From: Wesley Chalmers <wesley.chalm...@amd.com>

[WHY]
SCR for DP 2.0 spec says that multiple LTTPRs must not be accessed in a
single AUX transaction.
There may be other places in future where breaking up AUX accesses is
necessary.

[HOW]
Partition the entire DPCD address space into blocks. When an incoming AUX
request spans multiple blocks, break up the request into multiple requests.

Signed-off-by: Wesley Chalmers <wesley.chalm...@amd.com>
Reviewed-by: Jun Lei <jun....@amd.com>
Acked-by: Anson Jacob <anson.ja...@amd.com>
---
 .../drm/amd/display/dc/core/dc_link_dpcd.c    | 87 ++++++++++++++++++-
 include/drm/drm_dp_helper.h                   | 17 ++++
 2 files changed, 102 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
index 8957565f87bc..27ec1e6e9c43 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
@@ -43,6 +43,60 @@ static enum dc_status internal_link_write_dpcd(
        return DC_OK;
 }
 
+/*
+ * Partition the entire DPCD address space
+ * XXX: This partitioning must cover the entire DPCD address space,
+ * and must contain no gaps or overlapping address ranges.
+ */
+static const struct dpcd_address_range mandatory_dpcd_partitions[] = {
+       { 0, DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR1) - 1},
+       { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR1), 
DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR2) - 1 },
+       { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR2), 
DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR3) - 1 },
+       { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR3), 
DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR4) - 1 },
+       { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR4), 
DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR5) - 1 },
+       { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR5), 
DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR6) - 1 },
+       { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR6), 
DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR7) - 1 },
+       { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR7), 
DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR8) - 1 },
+       { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR8), 
DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1) - 1 },
+       /*
+        * The FEC registers are contiguous
+        */
+       { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1), 
DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1) - 1 },
+       { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR2), 
DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR2) - 1 },
+       { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR3), 
DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR3) - 1 },
+       { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR4), 
DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR4) - 1 },
+       { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR5), 
DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR5) - 1 },
+       { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR6), 
DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR6) - 1 },
+       { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR7), 
DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR7) - 1 },
+       { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR8), DP_LTTPR_MAX_ADD },
+       /* all remaining DPCD addresses */
+       { DP_LTTPR_MAX_ADD + 1, DP_DPCD_MAX_ADD } };
+
+static inline bool do_addresses_intersect_with_range(
+               const struct dpcd_address_range *range,
+               const uint32_t start_address,
+               const uint32_t end_address)
+{
+       return start_address <= range->end && end_address >= range->start;
+}
+
+static uint32_t dpcd_get_next_partition_size(const uint32_t address, const 
uint32_t size)
+{
+       const uint32_t end_address = END_ADDRESS(address, size);
+       uint32_t partition_iterator = 0;
+
+       /*
+        * find current partition
+        * this loop spins forever if partition map above is not surjective
+        */
+       while 
(!do_addresses_intersect_with_range(&mandatory_dpcd_partitions[partition_iterator],
+                               address, end_address))
+               partition_iterator++;
+       if (end_address < mandatory_dpcd_partitions[partition_iterator].end)
+               return size;
+       return ADDRESS_RANGE_SIZE(address, 
mandatory_dpcd_partitions[partition_iterator].end);
+}
+
 /*
  * Ranges of DPCD addresses that must be read in a single transaction
  * XXX: Do not allow any two address ranges in this array to overlap
@@ -115,12 +169,28 @@ enum dc_status core_link_read_dpcd(
        uint32_t size)
 {
        uint32_t extended_address;
+       uint32_t partitioned_address;
        uint8_t *extended_data;
        uint32_t extended_size;
+       /* size of the remaining partitioned address space */
+       uint32_t size_left_to_read;
        enum dc_status status;
+       /* size of the next partition to be read from */
+       uint32_t partition_size;
+       uint32_t data_index = 0;
 
        dpcd_extend_address_range(address, data, size, &extended_address, 
&extended_data, &extended_size);
-       status = internal_link_read_dpcd(link, extended_address, extended_data, 
extended_size);
+       partitioned_address = extended_address;
+       size_left_to_read = extended_size;
+       while (size_left_to_read) {
+               partition_size = 
dpcd_get_next_partition_size(partitioned_address, size_left_to_read);
+               status = internal_link_read_dpcd(link, partitioned_address, 
&extended_data[data_index], partition_size);
+               if (status != DC_OK)
+                       break;
+               partitioned_address += partition_size;
+               data_index += partition_size;
+               size_left_to_read -= partition_size;
+       }
        dpcd_reduce_address_range(extended_address, extended_data, 
extended_size, address, data, size);
        return status;
 }
@@ -131,5 +201,18 @@ enum dc_status core_link_write_dpcd(
        const uint8_t *data,
        uint32_t size)
 {
-       return internal_link_write_dpcd(link, address, data, size);
+       uint32_t partition_size;
+       uint32_t data_index = 0;
+       enum dc_status status;
+
+       while (size) {
+               partition_size = dpcd_get_next_partition_size(address, size);
+               status = internal_link_write_dpcd(link, address, 
&data[data_index], partition_size);
+               if (status != DC_OK)
+                       break;
+               address += partition_size;
+               data_index += partition_size;
+               size -= partition_size;
+       }
+       return status;
 }
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index edffd1dcca3e..35ac1f537530 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1369,10 +1369,27 @@ enum drm_dp_phy {
 #define DP_SYMBOL_ERROR_COUNT_LANE1_PHY_REPEATER1          0xf0037 /* 1.3 */
 #define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1          0xf0039 /* 1.3 */
 #define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1          0xf003b /* 1.3 */
+
+#define __DP_FEC1_BASE                                     0xf0290 /* 1.4 */
+#define __DP_FEC2_BASE                                     0xf0298 /* 1.4 */
+#define DP_FEC_BASE(dp_phy) \
+       (__DP_FEC1_BASE + ((__DP_FEC2_BASE - __DP_FEC1_BASE) * \
+                          ((dp_phy) - DP_PHY_LTTPR1)))
+
+#define DP_FEC_REG(dp_phy, fec1_reg) \
+       (DP_FEC_BASE(dp_phy) - DP_FEC_BASE(DP_PHY_LTTPR1) + fec1_reg)
+
 #define DP_FEC_STATUS_PHY_REPEATER1                        0xf0290 /* 1.4 */
+#define DP_FEC_STATUS_PHY_REPEATER(dp_phy) \
+       DP_FEC_REG(dp_phy, DP_FEC_STATUS_PHY_REPEATER1)
+
 #define DP_FEC_ERROR_COUNT_PHY_REPEATER1                    0xf0291 /* 1.4 */
 #define DP_FEC_CAPABILITY_PHY_REPEATER1                     0xf0294 /* 1.4a */
 
+#define DP_LTTPR_MAX_ADD                                   0xf02ff /* 1.4 */
+
+#define DP_DPCD_MAX_ADD                                            0xfffff /* 
1.4 */
+
 /* Repeater modes */
 #define DP_PHY_REPEATER_MODE_TRANSPARENT                   0x55    /* 1.3 */
 #define DP_PHY_REPEATER_MODE_NON_TRANSPARENT               0xaa    /* 1.3 */
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to