@@ -692,38 +692,29 @@ ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid)
692692 */
693693size_t
694694get_slot2 (size_t slot1 , uint64 hash ) {
695- size_t cache_line_ep ; // ending point of a cache line
696- size_t cache_line_sp ; // starting point of a cache line
697- size_t cache_line_interval ;
695+ size_t memory_page_ep ; // ending point of a cache line
696+ size_t memory_page_sp ; // starting point of a cache line
697+ size_t memory_page_interval ;
698698 size_t slot2 ;
699699
700- /* Get the ending point of a cache line within entries[]. */
701- cache_line_ep = (CACHE_LINE_ALIGN (offsetof(PtrackMapHdr , entries ) + slot1 * sizeof (XLogRecPtr ))
702- - offsetof(PtrackMapHdr , entries )) / sizeof (XLogRecPtr );
700+ /* Get the ending point of a memory page within entries[]. */
701+ memory_page_ep = (MEMORY_PAGE_ALIGN (offsetof(PtrackMapHdr , entries ) + slot1 * sizeof (uint32 ))
702+ - offsetof(PtrackMapHdr , entries )) / sizeof (uint32 );
703703 /* handling an overflow beyond the entries boundary */
704- cache_line_ep = cache_line_ep > PtrackContentNblocks ? PtrackContentNblocks : cache_line_ep ;
704+ memory_page_ep = memory_page_ep > PtrackContentNblocks ? PtrackContentNblocks : memory_page_ep ;
705705
706706 /* Get the starting point of a cache line within entries[]. */
707- cache_line_sp = cache_line_ep - ENTRIES_PER_LINE ;
707+ memory_page_sp = memory_page_ep - ENTRIES_PER_PAGE ;
708708
709709 /* Handling overflow below zero (sp then must be larger than ep) */
710- cache_line_sp = cache_line_sp > cache_line_ep ? 0 : cache_line_sp ;
710+ memory_page_sp = memory_page_sp > memory_page_ep ? 0 : memory_page_sp ;
711711
712- cache_line_interval = cache_line_ep - cache_line_sp ;
713- slot2 = (size_t )(cache_line_sp + (((hash << 32 ) | (hash >> 32 )) % cache_line_interval ));
714- slot2 = (slot1 == slot2 ) ? ((slot1 + 1 ) % cache_line_interval ) : slot2 ;
712+ memory_page_interval = memory_page_ep - memory_page_sp ;
713+ slot2 = (size_t )(memory_page_sp + (((hash << 32 ) | (hash >> 32 )) % memory_page_interval ));
714+ slot2 = (slot1 == slot2 ) ? ((slot1 + 1 ) % memory_page_interval ) : slot2 ;
715715 return slot2 ;
716716}
717717
718- /*
719- * Mark modified block in ptrack_map.
720- */
721- static void swap_slots (size_t * slot1 , size_t * slot2 ) {
722- * slot1 ^= * slot2 ;
723- * slot2 = * slot1 ^ * slot2 ;
724- * slot1 = * slot1 ^ * slot2 ;
725- }
726-
727718static void
728719ptrack_mark_map_pair (size_t slot1 , size_t slot2 , uint32 new_lsn32 )
729720{
@@ -786,10 +777,7 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode,
786777 bid .blocknum = InvalidBlockNumber ;
787778 hash = BID_HASH_FUNC (bid );
788779 max_lsn_slot1 = (size_t )(hash % PtrackContentNblocks );
789- max_lsn_slot2 = max_lsn_slot1 + 1 ;
790-
791- if (max_lsn_slot2 < max_lsn_slot1 )
792- swap_slots (& max_lsn_slot1 , & max_lsn_slot2 );
780+ max_lsn_slot2 = (max_lsn_slot1 + 1 ) % PtrackContentNblocks ;
793781
794782 if (RecoveryInProgress ())
795783 new_lsn = GetXLogReplayRecPtr (NULL );
@@ -830,10 +818,7 @@ XLogRecPtr ptrack_read_file_maxlsn(RelFileNode rnode, ForkNumber forknum)
830818 hash = BID_HASH_FUNC (bid );
831819
832820 slot1 = (size_t )(hash % PtrackContentNblocks );
833- slot2 = slot1 + 1 ;
834-
835- if (slot2 < slot1 )
836- swap_slots (& slot1 , & slot2 );
821+ slot2 = (slot1 + 1 ) % PtrackContentNblocks ;
837822
838823 update_lsn1 = pg_atomic_read_u32 (& ptrack_map -> entries [slot1 ]);
839824 update_lsn2 = pg_atomic_read_u32 (& ptrack_map -> entries [slot2 ]);
0 commit comments