diff --git a/.abi-compliance-history b/.abi-compliance-history new file mode 100644 index 0000000000000..6e84b96d85181 --- /dev/null +++ b/.abi-compliance-history @@ -0,0 +1,44 @@ +# Reference point for ABI compliance checks +# +# This file lists commits on the current branch that break ABI compatibility in +# ways that have been deemed acceptable (e.g., removing an extern function with +# no third-party uses). The primary intent of this file is to control the ABI +# compliance checks on the buildfarm, but it also serves as a central location +# to document the justification for each. +# +# In general, entries should be added reactively after an abi-compliance-check +# buildfarm failure. It is important to verify the details of the breakage +# match expectations, as the first entry listed will become the updated ABI +# baseline point. +# +# Add new entries by adding the output of the following to the top of the file: +# +# $ git log --pretty=format:"%H%n#%n# %s%n# %cd%n#%n# " $ABIBREAKGITHASH -1 --date=iso +# +# Be sure to replace "" with details of your change and +# why it is deemed acceptable. + +00eb646ea43410e5df77fed96f4a981e66811796 +# +# Check for CREATE privilege on the schema in CREATE STATISTICS. +# 2025-11-10 09:00:00 -0600 +# +# This commit added a parameter to CreateStatistics(). We are unaware of any +# impacted third-party code. + +c8af5019bee5c57502db830f8005a01cba60fee0 +# +# Fix lookups in pg_{clear,restore}_{attribute,relation}_stats(). +# 2025-10-15 12:47:33 -0500 +# +# This commit replaced two functions related to lookups/privilege checks for +# the new stats stuff in v18 with RangeVarGetRelidExtended(). These functions +# were not intended for use elsewhere, exist in exactly one release (18.0), and +# do not have any known third-party callers. + +9bbcec6030a2744d83311370ec92213fbd76e514 +# +# Translation updates +# 2025-09-22 14:18:56 +0200 +# +# This is the original ABI baseline point for REL_18_STABLE. diff --git a/.cirrus.star b/.cirrus.star index 36233872d1e50..e9bb672b95936 100644 --- a/.cirrus.star +++ b/.cirrus.star @@ -7,7 +7,7 @@ https://github.com/bazelbuild/starlark/blob/master/spec.md See also .cirrus.yml and src/tools/ci/README """ -load("cirrus", "env", "fs") +load("cirrus", "env", "fs", "re", "yaml") def main(): @@ -18,19 +18,36 @@ def main(): 1) the contents of .cirrus.yml - 2) if defined, the contents of the file referenced by the, repository + 2) computed environment variables + + 3) if defined, the contents of the file referenced by the, repository level, REPO_CI_CONFIG_GIT_URL variable (see https://cirrus-ci.org/guide/programming-tasks/#fs for the accepted format) - 3) .cirrus.tasks.yml + 4) .cirrus.tasks.yml """ output = "" # 1) is evaluated implicitly + # Add 2) + additional_env = compute_environment_vars() + env_fmt = """ +### +# Computed environment variables start here +### +{0} +### +# Computed environment variables end here +### +""" + output += env_fmt.format(yaml.dumps({'env': additional_env})) + + + # Add 3) repo_config_url = env.get("REPO_CI_CONFIG_GIT_URL") if repo_config_url != None: print("loading additional configuration from \"{}\"".format(repo_config_url)) @@ -38,12 +55,75 @@ def main(): else: output += "\n# REPO_CI_CONFIG_URL was not set\n" - # Add 3) + + # Add 4) output += config_from(".cirrus.tasks.yml") + return output +def compute_environment_vars(): + cenv = {} + + ### + # Some tasks are manually triggered by default because they might use too + # many resources for users of free Cirrus credits, but they can be + # triggered automatically by naming them in an environment variable e.g. + # REPO_CI_AUTOMATIC_TRIGGER_TASKS="task_name other_task" under "Repository + # Settings" on Cirrus CI's website. + + default_manual_trigger_tasks = ['mingw', 'netbsd', 'openbsd'] + + repo_ci_automatic_trigger_tasks = env.get('REPO_CI_AUTOMATIC_TRIGGER_TASKS', '') + for task in default_manual_trigger_tasks: + name = 'CI_TRIGGER_TYPE_' + task.upper() + if repo_ci_automatic_trigger_tasks.find(task) != -1: + value = 'automatic' + else: + value = 'manual' + cenv[name] = value + ### + + ### + # Parse "ci-os-only:" tag in commit message and set + # CI_{$OS}_ENABLED variable for each OS + + # We want to disable SanityCheck if testing just a specific OS. This + # shortens push-wait-for-ci cycle time a bit when debugging operating + # system specific failures. Just treating it as an OS in that case + # suffices. + + operating_systems = [ + 'compilerwarnings', + 'freebsd', + 'linux', + 'macos', + 'mingw', + 'netbsd', + 'openbsd', + 'sanitycheck', + 'windows', + ] + commit_message = env.get('CIRRUS_CHANGE_MESSAGE') + match_re = r"(^|.*\n)ci-os-only: ([^\n]+)($|\n.*)" + + # re.match() returns an array with a tuple of (matched-string, match_1, ...) + m = re.match(match_re, commit_message) + if m and len(m) > 0: + os_only = m[0][2] + os_only_list = re.split(r'[, ]+', os_only) + else: + os_only_list = operating_systems + + for os in operating_systems: + os_enabled = os in os_only_list + cenv['CI_{0}_ENABLED'.format(os.upper())] = os_enabled + ### + + return cenv + + def config_from(config_src): """return contents of config file `config_src`, surrounded by markers indicating start / end of the included file diff --git a/.cirrus.tasks.yml b/.cirrus.tasks.yml index 92057006c9309..dd24afd356701 100644 --- a/.cirrus.tasks.yml +++ b/.cirrus.tasks.yml @@ -72,13 +72,13 @@ task: # push-wait-for-ci cycle time a bit when debugging operating system specific # failures. Uses skip instead of only_if, as cirrus otherwise warns about # only_if conditions not matching. - skip: $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:.*' + skip: $CI_SANITYCHECK_ENABLED == false env: CPUS: 4 BUILD_JOBS: 8 TEST_JOBS: 8 - IMAGE_FAMILY: pg-ci-bookworm + IMAGE_FAMILY: pg-ci-trixie CCACHE_DIR: ${CIRRUS_WORKING_DIR}/ccache_dir # no options enabled, should be small CCACHE_MAXSIZE: "150M" @@ -104,6 +104,7 @@ task: configure_script: | su postgres <<-EOF + set -e meson setup \ --buildtype=debug \ --auto-features=disabled \ @@ -112,6 +113,7 @@ task: EOF build_script: | su postgres <<-EOF + set -e ninja -C build -j${BUILD_JOBS} ${MBUILD_TARGET} EOF upload_caches: ccache @@ -121,6 +123,7 @@ task: # tap test that exercises both a frontend binary and the backend. test_minimal_script: | su postgres <<-EOF + set -e ulimit -c unlimited meson test $MTEST_ARGS --suite setup meson test $MTEST_ARGS --num-processes ${TEST_JOBS} \ @@ -167,7 +170,7 @@ task: <<: *freebsd_task_template depends_on: SanityCheck - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' || $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:[^\n]*freebsd.*' + only_if: $CI_FREEBSD_ENABLED sysinfo_script: | id @@ -195,6 +198,7 @@ task: # already takes longer than other platforms except for windows. configure_script: | su postgres <<-EOF + set -e meson setup \ --buildtype=debug \ -Dcassert=true -Dinjection_points=true \ @@ -207,6 +211,7 @@ task: test_world_script: | su postgres <<-EOF + set -e ulimit -c unlimited meson test $MTEST_ARGS --num-processes ${TEST_JOBS} EOF @@ -231,6 +236,7 @@ task: # during upload, as it doesn't expect artifacts to change size stop_running_script: | su postgres <<-EOF + set -e build/tmp_install/usr/local/pgsql/bin/pg_ctl -D build/runningcheck stop || true EOF <<: *on_failure_meson @@ -239,7 +245,6 @@ task: task: depends_on: SanityCheck - trigger_type: manual env: # Below are experimentally derived to be a decent choice. @@ -257,7 +262,9 @@ task: matrix: - name: NetBSD - Meson - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' || $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:[^\n]*netbsd.*' + # See REPO_CI_AUTOMATIC_TRIGGER_TASKS in .cirrus.star + trigger_type: $CI_TRIGGER_TYPE_NETBSD + only_if: $CI_NETBSD_ENABLED env: OS_NAME: netbsd IMAGE_FAMILY: pg-ci-netbsd-postgres @@ -274,13 +281,16 @@ task: <<: *netbsd_task_template - name: OpenBSD - Meson - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' || $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:[^\n]*openbsd.*' + # See REPO_CI_AUTOMATIC_TRIGGER_TASKS in .cirrus.star + trigger_type: $CI_TRIGGER_TYPE_OPENBSD + only_if: $CI_OPENBSD_ENABLED env: OS_NAME: openbsd IMAGE_FAMILY: pg-ci-openbsd-postgres PKGCONFIG_PATH: '/usr/lib/pkgconfig:/usr/local/lib/pkgconfig' UUID: -Duuid=e2fs TCL: -Dtcl_version=tcl86 + CORE_DUMP_EXECUTABLE_DIR: $CIRRUS_WORKING_DIR/build/tmp_install/usr/local/pgsql/bin setup_additional_packages_script: | #pkg_add -I ... # Always core dump to ${CORE_DUMP_DIR} @@ -313,6 +323,7 @@ task: # And other uuid options are not available on NetBSD. configure_script: | su postgres <<-EOF + set -e meson setup \ --buildtype=debugoptimized \ --pkg-config-path ${PKGCONFIG_PATH} \ @@ -327,6 +338,7 @@ task: test_world_script: | su postgres <<-EOF + set -e ulimit -c unlimited # Otherwise tests will fail on OpenBSD, due to inability to start enough # processes. @@ -341,7 +353,7 @@ task: # ${CORE_DUMP_DIR}, they may not obey this. So, move core files to the # ${CORE_DUMP_DIR} directory. find build/ -type f -name '*.core' -exec mv '{}' ${CORE_DUMP_DIR} \; - src/tools/ci/cores_backtrace.sh ${OS_NAME} ${CORE_DUMP_DIR} + src/tools/ci/cores_backtrace.sh ${OS_NAME} ${CORE_DUMP_DIR} ${CORE_DUMP_EXECUTABLE_DIR} # configure feature flags, shared between the task running the linux tests and @@ -376,7 +388,7 @@ task: CPUS: 4 BUILD_JOBS: 4 TEST_JOBS: 8 # experimentally derived to be a decent choice - IMAGE_FAMILY: pg-ci-bookworm + IMAGE_FAMILY: pg-ci-trixie CCACHE_DIR: /tmp/ccache_dir DEBUGINFOD_URLS: "https://debuginfod.debian.net" @@ -397,7 +409,7 @@ task: # print_stacktraces=1,verbosity=2, duh # detect_leaks=0: too many uninteresting leak errors in short-lived binaries UBSAN_OPTIONS: print_stacktrace=1:disable_coredump=0:abort_on_error=1:verbosity=2 - ASAN_OPTIONS: print_stacktrace=1:disable_coredump=0:abort_on_error=1:detect_leaks=0 + ASAN_OPTIONS: print_stacktrace=1:disable_coredump=0:abort_on_error=1:detect_leaks=0:detect_stack_use_after_return=0 # SANITIZER_FLAGS is set in the tasks below CFLAGS: -Og -ggdb -fno-sanitize-recover=all $SANITIZER_FLAGS @@ -405,8 +417,6 @@ task: LDFLAGS: $SANITIZER_FLAGS CC: ccache gcc CXX: ccache g++ - # GCC emits a warning for llvm-14, so switch to a newer one. - LLVM_CONFIG: llvm-config-16 LINUX_CONFIGURE_FEATURES: *LINUX_CONFIGURE_FEATURES LINUX_MESON_FEATURES: *LINUX_MESON_FEATURES @@ -414,7 +424,7 @@ task: <<: *linux_task_template depends_on: SanityCheck - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' || $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:[^\n]*linux.*' + only_if: $CI_LINUX_ENABLED ccache_cache: folder: ${CCACHE_DIR} @@ -453,7 +463,7 @@ task: # - Uses address sanitizer, sanitizer failures are typically printed in # the server log # - Configures postgres with a small segment size - - name: Linux - Debian Bookworm - Autoconf + - name: Linux - Debian Trixie - Autoconf env: SANITIZER_FLAGS: -fsanitize=address @@ -467,6 +477,7 @@ task: # that. configure_script: | su postgres <<-EOF + set -e ./configure \ --enable-cassert --enable-injection-points --enable-debug \ --enable-tap-tests --enable-nls \ @@ -476,13 +487,14 @@ task: \ ${LINUX_CONFIGURE_FEATURES} \ \ - CLANG="ccache clang-16" + CLANG="ccache clang" EOF build_script: su postgres -c "make -s -j${BUILD_JOBS} world-bin" upload_caches: ccache test_world_script: | su postgres <<-EOF + set -e ulimit -c unlimited # default is 0 make -s ${CHECK} ${CHECKFLAGS} -j${TEST_JOBS} EOF @@ -495,7 +507,7 @@ task: # are typically printed in the server log # - Test both 64bit and 32 bit builds # - uses io_method=io_uring - - name: Linux - Debian Bookworm - Meson + - name: Linux - Debian Trixie - Meson env: CCACHE_MAXSIZE: "400M" # tests two different builds @@ -505,6 +517,7 @@ task: configure_script: | su postgres <<-EOF + set -e meson setup \ --buildtype=debug \ -Dcassert=true -Dinjection_points=true \ @@ -516,6 +529,7 @@ task: # locally. configure_32_script: | su postgres <<-EOF + set -e export CC='ccache gcc -m32' meson setup \ --buildtype=debug \ @@ -523,19 +537,21 @@ task: ${LINUX_MESON_FEATURES} \ -Dllvm=disabled \ --pkg-config-path /usr/lib/i386-linux-gnu/pkgconfig/ \ - -DPERL=perl5.36-i386-linux-gnu \ + -DPERL=perl5.40-i386-linux-gnu \ -Dlibnuma=disabled \ build-32 EOF build_script: | su postgres <<-EOF + set -e ninja -C build -j${BUILD_JOBS} ${MBUILD_TARGET} ninja -C build -t missingdeps EOF build_32_script: | su postgres <<-EOF + set -e ninja -C build-32 -j${BUILD_JOBS} ${MBUILD_TARGET} ninja -C build -t missingdeps EOF @@ -544,6 +560,7 @@ task: test_world_script: | su postgres <<-EOF + set -e ulimit -c unlimited meson test $MTEST_ARGS --num-processes ${TEST_JOBS} EOF @@ -556,6 +573,7 @@ task: # from C, prevent that with PYTHONCOERCECLOCALE. test_world_32_script: | su postgres <<-EOF + set -e ulimit -c unlimited PYTHONCOERCECLOCALE=0 LANG=C meson test $MTEST_ARGS -C build-32 --num-processes ${TEST_JOBS} EOF @@ -573,7 +591,7 @@ task: # SPECIAL: # - Enables --clone for pg_upgrade and pg_combinebackup task: - name: macOS - Sonoma - Meson + name: macOS - Sequoia - Meson env: CPUS: 4 # always get that much for cirrusci macOS instances @@ -582,7 +600,7 @@ task: # work OK. See # https://postgr.es/m/20220927040208.l3shfcidovpzqxfh%40awork3.anarazel.de TEST_JOBS: 8 - IMAGE: ghcr.io/cirruslabs/macos-runner:sonoma + IMAGE: ghcr.io/cirruslabs/macos-runner:sequoia CIRRUS_WORKING_DIR: ${HOME}/pgsql/ CCACHE_DIR: ${HOME}/ccache @@ -613,7 +631,7 @@ task: <<: *macos_task_template depends_on: SanityCheck - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' || $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:[^\n]*(macos|darwin|osx).*' + only_if: $CI_MACOS_ENABLED sysinfo_script: | id @@ -701,7 +719,7 @@ WINDOWS_ENVIRONMENT_BASE: &WINDOWS_ENVIRONMENT_BASE task: - name: Windows - Server 2019, VS 2019 - Meson & ninja + name: Windows - Server 2022, VS 2019 - Meson & ninja << : *WINDOWS_ENVIRONMENT_BASE env: @@ -719,7 +737,7 @@ task: <<: *windows_task_template depends_on: SanityCheck - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' || $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:[^\n]*windows.*' + only_if: $CI_WINDOWS_ENABLED setup_additional_packages_script: | REM choco install -y --no-progress ... @@ -730,10 +748,9 @@ task: echo 127.0.0.3 pg-loadbalancetest >> c:\Windows\System32\Drivers\etc\hosts type c:\Windows\System32\Drivers\etc\hosts - # Use /DEBUG:FASTLINK to avoid high memory usage during linking configure_script: | vcvarsall x64 - meson setup --backend ninja --buildtype debug -Dc_link_args=/DEBUG:FASTLINK -Dcassert=true -Dinjection_points=true -Db_pch=true -Dextra_lib_dirs=c:\openssl\1.1\lib -Dextra_include_dirs=c:\openssl\1.1\include -DTAR=%TAR% build + meson setup --backend ninja --buildtype debug -Dcassert=true -Dinjection_points=true -Db_pch=true -Dextra_lib_dirs=c:\openssl\1.1\lib -Dextra_include_dirs=c:\openssl\1.1\include -DTAR=%TAR% build build_script: | vcvarsall x64 @@ -753,15 +770,13 @@ task: task: << : *WINDOWS_ENVIRONMENT_BASE - name: Windows - Server 2019, MinGW64 - Meson - - # due to resource constraints we don't run this task by default for now - trigger_type: manual - # worth using only_if despite being manual, otherwise this task will show up - # when e.g. ci-os-only: linux is used. - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' || $CIRRUS_CHANGE_MESSAGE =~ '.*\nci-os-only:[^\n]*mingw.*' - # otherwise it'll be sorted before other tasks + name: Windows - Server 2022, MinGW64 - Meson + + # See REPO_CI_AUTOMATIC_TRIGGER_TASKS in .cirrus.star. + trigger_type: $CI_TRIGGER_TYPE_MINGW + depends_on: SanityCheck + only_if: $CI_MINGW_ENABLED env: TEST_JOBS: 4 # higher concurrency causes occasional failures @@ -815,15 +830,14 @@ task: # To limit unnecessary work only run this once the SanityCheck # succeeds. This is particularly important for this task as we intentionally - # use always: to continue after failures. Task that did not run count as a - # success, so we need to recheck SanityChecks's condition here ... + # use always: to continue after failures. depends_on: SanityCheck - only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\nci-os-only:.*' + only_if: $CI_COMPILERWARNINGS_ENABLED env: CPUS: 4 BUILD_JOBS: 4 - IMAGE_FAMILY: pg-ci-bookworm + IMAGE_FAMILY: pg-ci-trixie # Use larger ccache cache, as this task compiles with multiple compilers / # flag combinations @@ -833,9 +847,6 @@ task: LINUX_CONFIGURE_FEATURES: *LINUX_CONFIGURE_FEATURES LINUX_MESON_FEATURES: *LINUX_MESON_FEATURES - # GCC emits a warning for llvm-14, so switch to a newer one. - LLVM_CONFIG: llvm-config-16 - <<: *linux_task_template sysinfo_script: | @@ -871,7 +882,7 @@ task: --cache gcc.cache \ --enable-dtrace \ ${LINUX_CONFIGURE_FEATURES} \ - CC="ccache gcc" CXX="ccache g++" CLANG="ccache clang-16" + CC="ccache gcc" CXX="ccache g++" CLANG="ccache clang" make -s -j${BUILD_JOBS} clean time make -s -j${BUILD_JOBS} world-bin @@ -882,7 +893,7 @@ task: --cache gcc.cache \ --enable-cassert \ ${LINUX_CONFIGURE_FEATURES} \ - CC="ccache gcc" CXX="ccache g++" CLANG="ccache clang-16" + CC="ccache gcc" CXX="ccache g++" CLANG="ccache clang" make -s -j${BUILD_JOBS} clean time make -s -j${BUILD_JOBS} world-bin @@ -892,7 +903,7 @@ task: time ./configure \ --cache clang.cache \ ${LINUX_CONFIGURE_FEATURES} \ - CC="ccache clang" CXX="ccache clang++-16" CLANG="ccache clang-16" + CC="ccache clang" CXX="ccache clang++" CLANG="ccache clang" make -s -j${BUILD_JOBS} clean time make -s -j${BUILD_JOBS} world-bin @@ -904,7 +915,7 @@ task: --enable-cassert \ --enable-dtrace \ ${LINUX_CONFIGURE_FEATURES} \ - CC="ccache clang" CXX="ccache clang++-16" CLANG="ccache clang-16" + CC="ccache clang" CXX="ccache clang++" CLANG="ccache clang" make -s -j${BUILD_JOBS} clean time make -s -j${BUILD_JOBS} world-bin @@ -912,11 +923,11 @@ task: always: mingw_cross_warning_script: | time ./configure \ - --host=x86_64-w64-mingw32 \ + --host=x86_64-w64-mingw32ucrt \ --enable-cassert \ --without-icu \ - CC="ccache x86_64-w64-mingw32-gcc" \ - CXX="ccache x86_64-w64-mingw32-g++" + CC="ccache x86_64-w64-mingw32ucrt-gcc" \ + CXX="ccache x86_64-w64-mingw32ucrt-g++" make -s -j${BUILD_JOBS} clean time make -s -j${BUILD_JOBS} world-bin @@ -928,7 +939,7 @@ task: docs_build_script: | time ./configure \ --cache gcc.cache \ - CC="ccache gcc" CXX="ccache g++" CLANG="ccache clang-16" + CC="ccache gcc" CXX="ccache g++" CLANG="ccache clang" make -s -j${BUILD_JOBS} clean time make -s -j${BUILD_JOBS} -C doc @@ -938,16 +949,13 @@ task: # - Don't use ccache, the files are uncacheable, polluting ccache's # cache # - Use -fmax-errors, as particularly cpluspluscheck can be very verbose - # - XXX have to disable ICU to avoid errors: - # https://postgr.es/m/20220323002024.f2g6tivduzrktgfa%40alap3.anarazel.de ### always: headers_headerscheck_script: | time ./configure \ ${LINUX_CONFIGURE_FEATURES} \ - --without-icu \ --quiet \ - CC="gcc" CXX"=g++" CLANG="clang-16" + CC="gcc" CXX"=g++" CLANG="clang" make -s -j${BUILD_JOBS} clean time make -s headerscheck EXTRAFLAGS='-fmax-errors=10' headers_cpluspluscheck_script: | diff --git a/.cirrus.yml b/.cirrus.yml index 33c6e481d746a..3f75852e84ecb 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -10,12 +10,20 @@ # # 1) the contents of this file # -# 2) if defined, the contents of the file referenced by the, repository +# 2) computed environment variables +# +# Used to enable/disable tasks based on the execution environment. See +# .cirrus.star: compute_environment_vars() +# +# 3) if defined, the contents of the file referenced by the, repository # level, REPO_CI_CONFIG_GIT_URL variable (see # https://cirrus-ci.org/guide/programming-tasks/#fs for the accepted # format) # -# 3) .cirrus.tasks.yml +# This allows running tasks in a different execution environment than the +# default, e.g. to have sufficient resources for cfbot. +# +# 4) .cirrus.tasks.yml # # This composition is done by .cirrus.star diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 8048afd1a80fa..88aa34ab4b220 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -14,6 +14,15 @@ # # $ git log --pretty=format:"%H # %cd%n# %s" $PGINDENTGITHASH -1 --date=iso +2795f5a5428fd292996fd155f16a57b0db3df8f4 # 2025-10-21 09:56:26 -0500 +# Re-pgindent brin.c. + +17a5ca58eb119a33e81e57b72618236538932167 # 2025-09-13 14:50:02 -0500 +# Re-pgindent nbtpreprocesskeys.c after commit 796962922e. + +07448b3969d55a2081cdafafc23f68df3392f220 # 2025-07-01 15:24:19 +0200 +# Fix indentation in pg_numa code + b27644bade0348d0dafd3036c47880a349fe9332 # 2025-06-15 13:04:24 -0400 # Sync typedefs.list with the buildfarm. diff --git a/.gitattributes b/.gitattributes index 8df6b75e653b8..4e26bbfb14552 100644 --- a/.gitattributes +++ b/.gitattributes @@ -12,8 +12,8 @@ *.xsl whitespace=space-before-tab,trailing-space,tab-in-indent # Avoid confusing ASCII underlines with leftover merge conflict markers -README conflict-marker-size=32 -README.* conflict-marker-size=32 +README conflict-marker-size=48 +README.* conflict-marker-size=48 # Certain data files that contain special whitespace, and other special cases *.data -whitespace diff --git a/Makefile b/Makefile index 8a2ec9396b6b4..b363b2f24766b 100644 --- a/Makefile +++ b/Makefile @@ -20,7 +20,7 @@ all: all check install installdirs installcheck installcheck-parallel uninstall clean distclean maintainer-clean dist distcheck world check-world install-world installcheck-world: @if [ ! -f GNUmakefile ] ; then \ echo "You need to run the 'configure' program first. Please see"; \ - echo "" ; \ + echo "" ; \ false ; \ fi @IFS=':' ; \ diff --git a/README.md b/README.md index f6104c038b3d5..7352a90a72373 100644 --- a/README.md +++ b/README.md @@ -12,9 +12,9 @@ and functions. This distribution also contains C language bindings. Copyright and license information can be found in the file COPYRIGHT. General documentation about this version of PostgreSQL can be found at -. In particular, information +. In particular, information about building PostgreSQL from the source code can be found at -. +. The latest version of this software, and related software, may be obtained at . For more information diff --git a/config/llvm.m4 b/config/llvm.m4 index fa4bedd9370fc..9d6fe8199e364 100644 --- a/config/llvm.m4 +++ b/config/llvm.m4 @@ -4,7 +4,7 @@ # ----------------- # # Look for the LLVM installation, check that it's new enough, set the -# corresponding LLVM_{CFLAGS,CXXFLAGS,BINPATH} and LDFLAGS +# corresponding LLVM_{CFLAGS,CXXFLAGS,BINPATH,LIBS} # variables. Also verify that CLANG is available, to transform C # into bitcode. # @@ -55,7 +55,7 @@ AC_DEFUN([PGAC_LLVM_SUPPORT], for pgac_option in `$LLVM_CONFIG --ldflags`; do case $pgac_option in - -L*) LDFLAGS="$LDFLAGS $pgac_option";; + -L*) LLVM_LIBS="$LLVM_LIBS $pgac_option";; esac done diff --git a/config/programs.m4 b/config/programs.m4 index 0ad1e58b48d6b..e57fe4907b844 100644 --- a/config/programs.m4 +++ b/config/programs.m4 @@ -284,20 +284,26 @@ AC_DEFUN([PGAC_CHECK_STRIP], AC_DEFUN([PGAC_CHECK_LIBCURL], [ + # libcurl compiler/linker flags are kept separate from the global flags, so + # they have to be added back temporarily for the following tests. + pgac_save_CPPFLAGS=$CPPFLAGS + pgac_save_LDFLAGS=$LDFLAGS + pgac_save_LIBS=$LIBS + + CPPFLAGS="$CPPFLAGS $LIBCURL_CPPFLAGS" + LDFLAGS="$LDFLAGS $LIBCURL_LDFLAGS" + AC_CHECK_HEADER(curl/curl.h, [], [AC_MSG_ERROR([header file is required for --with-libcurl])]) + + # LIBCURL_LDLIBS is determined here. Like the compiler flags, it should not + # pollute the global LIBS setting. AC_CHECK_LIB(curl, curl_multi_init, [ AC_DEFINE([HAVE_LIBCURL], [1], [Define to 1 if you have the `curl' library (-lcurl).]) AC_SUBST(LIBCURL_LDLIBS, -lcurl) ], [AC_MSG_ERROR([library 'curl' does not provide curl_multi_init])]) - pgac_save_CPPFLAGS=$CPPFLAGS - pgac_save_LDFLAGS=$LDFLAGS - pgac_save_LIBS=$LIBS - - CPPFLAGS="$LIBCURL_CPPFLAGS $CPPFLAGS" - LDFLAGS="$LIBCURL_LDFLAGS $LDFLAGS" LIBS="$LIBCURL_LDLIBS $LIBS" # Check to see whether the current platform supports threadsafe Curl diff --git a/configure b/configure index 3d3d3db97a456..8bd17bdb88c54 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for PostgreSQL 18beta1. +# Generated by GNU Autoconf 2.69 for PostgreSQL 18.1. # # Report bugs to . # @@ -582,8 +582,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='PostgreSQL' PACKAGE_TARNAME='postgresql' -PACKAGE_VERSION='18beta1' -PACKAGE_STRING='PostgreSQL 18beta1' +PACKAGE_VERSION='18.1' +PACKAGE_STRING='PostgreSQL 18.1' PACKAGE_BUGREPORT='pgsql-bugs@lists.postgresql.org' PACKAGE_URL='https://www.postgresql.org/' @@ -1468,7 +1468,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures PostgreSQL 18beta1 to adapt to many kinds of systems. +\`configure' configures PostgreSQL 18.1 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1533,7 +1533,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of PostgreSQL 18beta1:";; + short | recursive ) echo "Configuration of PostgreSQL 18.1:";; esac cat <<\_ACEOF @@ -1724,7 +1724,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -PostgreSQL configure 18beta1 +PostgreSQL configure 18.1 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -2477,7 +2477,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by PostgreSQL $as_me 18beta1, which was +It was created by PostgreSQL $as_me 18.1, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -5194,7 +5194,7 @@ fi for pgac_option in `$LLVM_CONFIG --ldflags`; do case $pgac_option in - -L*) LDFLAGS="$LDFLAGS $pgac_option";; + -L*) LLVM_LIBS="$LLVM_LIBS $pgac_option";; esac done @@ -9436,12 +9436,12 @@ fi # Note the user could also set XML2_CFLAGS/XML2_LIBS directly for pgac_option in $XML2_CFLAGS; do case $pgac_option in - -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";; + -I*|-D*) INCLUDES="$INCLUDES $pgac_option";; esac done for pgac_option in $XML2_LIBS; do case $pgac_option in - -L*) LDFLAGS="$LDFLAGS $pgac_option";; + -L*) LIBDIRS="$LIBDIRS $pgac_option";; esac done fi @@ -9666,12 +9666,12 @@ fi # note that -llz4 will be added by AC_CHECK_LIB below. for pgac_option in $LZ4_CFLAGS; do case $pgac_option in - -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";; + -I*|-D*) INCLUDES="$INCLUDES $pgac_option";; esac done for pgac_option in $LZ4_LIBS; do case $pgac_option in - -L*) LDFLAGS="$LDFLAGS $pgac_option";; + -L*) LIBDIRS="$LIBDIRS $pgac_option";; esac done fi @@ -9807,12 +9807,12 @@ fi # note that -lzstd will be added by AC_CHECK_LIB below. for pgac_option in $ZSTD_CFLAGS; do case $pgac_option in - -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";; + -I*|-D*) INCLUDES="$INCLUDES $pgac_option";; esac done for pgac_option in $ZSTD_LIBS; do case $pgac_option in - -L*) LDFLAGS="$LDFLAGS $pgac_option";; + -L*) LIBDIRS="$LIBDIRS $pgac_option";; esac done fi @@ -12717,6 +12717,15 @@ fi if test "$with_libcurl" = yes ; then + # libcurl compiler/linker flags are kept separate from the global flags, so + # they have to be added back temporarily for the following tests. + pgac_save_CPPFLAGS=$CPPFLAGS + pgac_save_LDFLAGS=$LDFLAGS + pgac_save_LIBS=$LIBS + + CPPFLAGS="$CPPFLAGS $LIBCURL_CPPFLAGS" + LDFLAGS="$LDFLAGS $LIBCURL_LDFLAGS" + ac_fn_c_check_header_mongrel "$LINENO" "curl/curl.h" "ac_cv_header_curl_curl_h" "$ac_includes_default" if test "x$ac_cv_header_curl_curl_h" = xyes; then : @@ -12725,6 +12734,9 @@ else fi + + # LIBCURL_LDLIBS is determined here. Like the compiler flags, it should not + # pollute the global LIBS setting. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for curl_multi_init in -lcurl" >&5 $as_echo_n "checking for curl_multi_init in -lcurl... " >&6; } if ${ac_cv_lib_curl_curl_multi_init+:} false; then : @@ -12774,12 +12786,6 @@ else fi - pgac_save_CPPFLAGS=$CPPFLAGS - pgac_save_LDFLAGS=$LDFLAGS - pgac_save_LIBS=$LIBS - - CPPFLAGS="$LIBCURL_CPPFLAGS $CPPFLAGS" - LDFLAGS="$LIBCURL_LDFLAGS $LDFLAGS" LIBS="$LIBCURL_LDLIBS $LIBS" # Check to see whether the current platform supports threadsafe Curl @@ -13309,6 +13315,23 @@ fi fi +if test "$with_liburing" = yes; then + _LIBS="$LIBS" + LIBS="$LIBURING_LIBS $LIBS" + for ac_func in io_uring_queue_init_mem +do : + ac_fn_c_check_func "$LINENO" "io_uring_queue_init_mem" "ac_cv_func_io_uring_queue_init_mem" +if test "x$ac_cv_func_io_uring_queue_init_mem" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_IO_URING_QUEUE_INIT_MEM 1 +_ACEOF + +fi +done + + LIBS="$_LIBS" +fi + if test "$with_lz4" = yes ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for LZ4_compress_default in -llz4" >&5 $as_echo_n "checking for LZ4_compress_default in -llz4... " >&6; } @@ -16635,7 +16658,7 @@ fi if test "$with_icu" = yes; then ac_save_CPPFLAGS=$CPPFLAGS - CPPFLAGS="$ICU_CFLAGS $CPPFLAGS" + CPPFLAGS="$CPPFLAGS $ICU_CFLAGS" # Verify we have ICU's header files ac_fn_c_check_header_mongrel "$LINENO" "unicode/ucol.h" "ac_cv_header_unicode_ucol_h" "$ac_includes_default" @@ -17542,7 +17565,7 @@ $as_echo "#define HAVE_GCC__ATOMIC_INT64_CAS 1" >>confdefs.h fi -# Check for x86 cpuid instruction +# Check for __get_cpuid() and __cpuid() { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __get_cpuid" >&5 $as_echo_n "checking for __get_cpuid... " >&6; } if ${pgac_cv__get_cpuid+:} false; then : @@ -17575,77 +17598,79 @@ if test x"$pgac_cv__get_cpuid" = x"yes"; then $as_echo "#define HAVE__GET_CPUID 1" >>confdefs.h -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __get_cpuid_count" >&5 -$as_echo_n "checking for __get_cpuid_count... " >&6; } -if ${pgac_cv__get_cpuid_count+:} false; then : +else + # __cpuid() + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __cpuid" >&5 +$as_echo_n "checking for __cpuid... " >&6; } +if ${pgac_cv__cpuid+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include +#include int main () { unsigned int exx[4] = {0, 0, 0, 0}; - __get_cpuid_count(7, 0, &exx[0], &exx[1], &exx[2], &exx[3]); + __cpuid(exx, 1); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - pgac_cv__get_cpuid_count="yes" + pgac_cv__cpuid="yes" else - pgac_cv__get_cpuid_count="no" + pgac_cv__cpuid="no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__get_cpuid_count" >&5 -$as_echo "$pgac_cv__get_cpuid_count" >&6; } -if test x"$pgac_cv__get_cpuid_count" = x"yes"; then +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__cpuid" >&5 +$as_echo "$pgac_cv__cpuid" >&6; } + if test x"$pgac_cv__cpuid" = x"yes"; then -$as_echo "#define HAVE__GET_CPUID_COUNT 1" >>confdefs.h +$as_echo "#define HAVE__CPUID 1" >>confdefs.h + fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __cpuid" >&5 -$as_echo_n "checking for __cpuid... " >&6; } -if ${pgac_cv__cpuid+:} false; then : +# Check for __get_cpuid_count() and __cpuidex() in a similar fashion. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __get_cpuid_count" >&5 +$as_echo_n "checking for __get_cpuid_count... " >&6; } +if ${pgac_cv__get_cpuid_count+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include +#include int main () { unsigned int exx[4] = {0, 0, 0, 0}; - __get_cpuid(exx[0], 1); + __get_cpuid_count(7, 0, &exx[0], &exx[1], &exx[2], &exx[3]); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - pgac_cv__cpuid="yes" + pgac_cv__get_cpuid_count="yes" else - pgac_cv__cpuid="no" + pgac_cv__get_cpuid_count="no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__cpuid" >&5 -$as_echo "$pgac_cv__cpuid" >&6; } -if test x"$pgac_cv__cpuid" = x"yes"; then - -$as_echo "#define HAVE__CPUID 1" >>confdefs.h +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__get_cpuid_count" >&5 +$as_echo "$pgac_cv__get_cpuid_count" >&6; } +if test x"$pgac_cv__get_cpuid_count" = x"yes"; then -fi +$as_echo "#define HAVE__GET_CPUID_COUNT 1" >>confdefs.h -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __cpuidex" >&5 +else + # __cpuidex() + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __cpuidex" >&5 $as_echo_n "checking for __cpuidex... " >&6; } if ${pgac_cv__cpuidex+:} false; then : $as_echo_n "(cached) " >&6 @@ -17657,7 +17682,7 @@ int main () { unsigned int exx[4] = {0, 0, 0, 0}; - __get_cpuidex(exx[0], 7, 0); + __cpuidex(exx, 7, 0); ; return 0; @@ -17673,10 +17698,11 @@ rm -f core conftest.err conftest.$ac_objext \ fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__cpuidex" >&5 $as_echo "$pgac_cv__cpuidex" >&6; } -if test x"$pgac_cv__cpuidex" = x"yes"; then + if test x"$pgac_cv__cpuidex" = x"yes"; then $as_echo "#define HAVE__CPUIDEX 1" >>confdefs.h + fi fi # Check for XSAVE intrinsics @@ -18853,7 +18879,7 @@ Use --without-tcl to disable building PL/Tcl." "$LINENO" 5 fi # now that we have TCL_INCLUDE_SPEC, we can check for ac_save_CPPFLAGS=$CPPFLAGS - CPPFLAGS="$TCL_INCLUDE_SPEC $CPPFLAGS" + CPPFLAGS="$CPPFLAGS $TCL_INCLUDE_SPEC" ac_fn_c_check_header_mongrel "$LINENO" "tcl.h" "ac_cv_header_tcl_h" "$ac_includes_default" if test "x$ac_cv_header_tcl_h" = xyes; then : @@ -18922,7 +18948,7 @@ fi # check for if test "$with_python" = yes; then ac_save_CPPFLAGS=$CPPFLAGS - CPPFLAGS="$python_includespec $CPPFLAGS" + CPPFLAGS="$CPPFLAGS $python_includespec" ac_fn_c_check_header_mongrel "$LINENO" "Python.h" "ac_cv_header_Python_h" "$ac_includes_default" if test "x$ac_cv_header_Python_h" = xyes; then : @@ -20063,7 +20089,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by PostgreSQL $as_me 18beta1, which was +This file was extended by PostgreSQL $as_me 18.1, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -20134,7 +20160,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -PostgreSQL config.status 18beta1 +PostgreSQL config.status 18.1 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index 4b8335dc6138e..eb79f9b2e6d3d 100644 --- a/configure.ac +++ b/configure.ac @@ -17,7 +17,7 @@ dnl Read the Autoconf manual for details. dnl m4_pattern_forbid(^PGAC_)dnl to catch undefined macros -AC_INIT([PostgreSQL], [18beta1], [pgsql-bugs@lists.postgresql.org], [], [https://www.postgresql.org/]) +AC_INIT([PostgreSQL], [18.1], [pgsql-bugs@lists.postgresql.org], [], [https://www.postgresql.org/]) m4_if(m4_defn([m4_PACKAGE_VERSION]), [2.69], [], [m4_fatal([Autoconf version 2.69 is required. Untested combinations of 'autoconf' and PostgreSQL versions are not @@ -1103,12 +1103,12 @@ if test "$with_libxml" = yes ; then # Note the user could also set XML2_CFLAGS/XML2_LIBS directly for pgac_option in $XML2_CFLAGS; do case $pgac_option in - -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";; + -I*|-D*) INCLUDES="$INCLUDES $pgac_option";; esac done for pgac_option in $XML2_LIBS; do case $pgac_option in - -L*) LDFLAGS="$LDFLAGS $pgac_option";; + -L*) LIBDIRS="$LIBDIRS $pgac_option";; esac done fi @@ -1152,12 +1152,12 @@ if test "$with_lz4" = yes; then # note that -llz4 will be added by AC_CHECK_LIB below. for pgac_option in $LZ4_CFLAGS; do case $pgac_option in - -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";; + -I*|-D*) INCLUDES="$INCLUDES $pgac_option";; esac done for pgac_option in $LZ4_LIBS; do case $pgac_option in - -L*) LDFLAGS="$LDFLAGS $pgac_option";; + -L*) LIBDIRS="$LIBDIRS $pgac_option";; esac done fi @@ -1177,12 +1177,12 @@ if test "$with_zstd" = yes; then # note that -lzstd will be added by AC_CHECK_LIB below. for pgac_option in $ZSTD_CFLAGS; do case $pgac_option in - -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";; + -I*|-D*) INCLUDES="$INCLUDES $pgac_option";; esac done for pgac_option in $ZSTD_LIBS; do case $pgac_option in - -L*) LDFLAGS="$LDFLAGS $pgac_option";; + -L*) LIBDIRS="$LIBDIRS $pgac_option";; esac done fi @@ -1420,6 +1420,13 @@ if test "$with_libxslt" = yes ; then AC_CHECK_LIB(xslt, xsltCleanupGlobals, [], [AC_MSG_ERROR([library 'xslt' is required for XSLT support])]) fi +if test "$with_liburing" = yes; then + _LIBS="$LIBS" + LIBS="$LIBURING_LIBS $LIBS" + AC_CHECK_FUNCS([io_uring_queue_init_mem]) + LIBS="$_LIBS" +fi + if test "$with_lz4" = yes ; then AC_CHECK_LIB(lz4, LZ4_compress_default, [], [AC_MSG_ERROR([library 'lz4' is required for LZ4 support])]) fi @@ -1937,7 +1944,7 @@ fi if test "$with_icu" = yes; then ac_save_CPPFLAGS=$CPPFLAGS - CPPFLAGS="$ICU_CFLAGS $CPPFLAGS" + CPPFLAGS="$CPPFLAGS $ICU_CFLAGS" # Verify we have ICU's header files AC_CHECK_HEADER(unicode/ucol.h, [], @@ -2037,7 +2044,7 @@ PGAC_HAVE_GCC__ATOMIC_INT32_CAS PGAC_HAVE_GCC__ATOMIC_INT64_CAS -# Check for x86 cpuid instruction +# Check for __get_cpuid() and __cpuid() AC_CACHE_CHECK([for __get_cpuid], [pgac_cv__get_cpuid], [AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], [[unsigned int exx[4] = {0, 0, 0, 0}; @@ -2047,8 +2054,21 @@ AC_CACHE_CHECK([for __get_cpuid], [pgac_cv__get_cpuid], [pgac_cv__get_cpuid="no"])]) if test x"$pgac_cv__get_cpuid" = x"yes"; then AC_DEFINE(HAVE__GET_CPUID, 1, [Define to 1 if you have __get_cpuid.]) +else + # __cpuid() + AC_CACHE_CHECK([for __cpuid], [pgac_cv__cpuid], + [AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], + [[unsigned int exx[4] = {0, 0, 0, 0}; + __cpuid(exx, 1); + ]])], + [pgac_cv__cpuid="yes"], + [pgac_cv__cpuid="no"])]) + if test x"$pgac_cv__cpuid" = x"yes"; then + AC_DEFINE(HAVE__CPUID, 1, [Define to 1 if you have __cpuid.]) + fi fi +# Check for __get_cpuid_count() and __cpuidex() in a similar fashion. AC_CACHE_CHECK([for __get_cpuid_count], [pgac_cv__get_cpuid_count], [AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], [[unsigned int exx[4] = {0, 0, 0, 0}; @@ -2058,28 +2078,18 @@ AC_CACHE_CHECK([for __get_cpuid_count], [pgac_cv__get_cpuid_count], [pgac_cv__get_cpuid_count="no"])]) if test x"$pgac_cv__get_cpuid_count" = x"yes"; then AC_DEFINE(HAVE__GET_CPUID_COUNT, 1, [Define to 1 if you have __get_cpuid_count.]) -fi - -AC_CACHE_CHECK([for __cpuid], [pgac_cv__cpuid], -[AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], - [[unsigned int exx[4] = {0, 0, 0, 0}; - __get_cpuid(exx[0], 1); - ]])], - [pgac_cv__cpuid="yes"], - [pgac_cv__cpuid="no"])]) -if test x"$pgac_cv__cpuid" = x"yes"; then - AC_DEFINE(HAVE__CPUID, 1, [Define to 1 if you have __cpuid.]) -fi - -AC_CACHE_CHECK([for __cpuidex], [pgac_cv__cpuidex], -[AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], - [[unsigned int exx[4] = {0, 0, 0, 0}; - __get_cpuidex(exx[0], 7, 0); - ]])], - [pgac_cv__cpuidex="yes"], - [pgac_cv__cpuidex="no"])]) -if test x"$pgac_cv__cpuidex" = x"yes"; then - AC_DEFINE(HAVE__CPUIDEX, 1, [Define to 1 if you have __cpuidex.]) +else + # __cpuidex() + AC_CACHE_CHECK([for __cpuidex], [pgac_cv__cpuidex], + [AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], + [[unsigned int exx[4] = {0, 0, 0, 0}; + __cpuidex(exx, 7, 0); + ]])], + [pgac_cv__cpuidex="yes"], + [pgac_cv__cpuidex="no"])]) + if test x"$pgac_cv__cpuidex" = x"yes"; then + AC_DEFINE(HAVE__CPUIDEX, 1, [Define to 1 if you have __cpuidex.]) + fi fi # Check for XSAVE intrinsics @@ -2337,7 +2347,7 @@ Use --without-tcl to disable building PL/Tcl.]) fi # now that we have TCL_INCLUDE_SPEC, we can check for ac_save_CPPFLAGS=$CPPFLAGS - CPPFLAGS="$TCL_INCLUDE_SPEC $CPPFLAGS" + CPPFLAGS="$CPPFLAGS $TCL_INCLUDE_SPEC" AC_CHECK_HEADER(tcl.h, [], [AC_MSG_ERROR([header file is required for Tcl])]) CPPFLAGS=$ac_save_CPPFLAGS fi @@ -2374,7 +2384,7 @@ fi # check for if test "$with_python" = yes; then ac_save_CPPFLAGS=$CPPFLAGS - CPPFLAGS="$python_includespec $CPPFLAGS" + CPPFLAGS="$CPPFLAGS $python_includespec" AC_CHECK_HEADER(Python.h, [], [AC_MSG_ERROR([header file is required for Python])]) CPPFLAGS=$ac_save_CPPFLAGS fi diff --git a/contrib/amcheck/t/002_cic.pl b/contrib/amcheck/t/002_cic.pl index 6a0c4f611258f..f4a24936b2c01 100644 --- a/contrib/amcheck/t/002_cic.pl +++ b/contrib/amcheck/t/002_cic.pl @@ -64,5 +64,28 @@ ) }); +# Test bt_index_parent_check() with indexes created with +# CREATE INDEX CONCURRENTLY. +$node->safe_psql('postgres', q(CREATE TABLE quebec(i int primary key))); +# Insert two rows into index +$node->safe_psql('postgres', + q(INSERT INTO quebec SELECT i FROM generate_series(1, 2) s(i);)); + +# start background transaction +my $in_progress_h = $node->background_psql('postgres'); +$in_progress_h->query_safe(q(BEGIN; SELECT pg_current_xact_id();)); + +# delete one row from table, while background transaction is in progress +$node->safe_psql('postgres', q(DELETE FROM quebec WHERE i = 1;)); +# create index concurrently, which will skip the deleted row +$node->safe_psql('postgres', q(CREATE INDEX CONCURRENTLY oscar ON quebec(i);)); + +# check index using bt_index_parent_check +my $result = $node->psql('postgres', + q(SELECT bt_index_parent_check('oscar', heapallindexed => true))); +is($result, '0', 'bt_index_parent_check for CIC after removed row'); + +$in_progress_h->quit; + $node->stop; done_testing(); diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c index f11c43a0ed797..7898c05c045fd 100644 --- a/contrib/amcheck/verify_nbtree.c +++ b/contrib/amcheck/verify_nbtree.c @@ -382,7 +382,6 @@ bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace, BTMetaPageData *metad; uint32 previouslevel; BtreeLevel current; - Snapshot snapshot = SnapshotAny; if (!readonly) elog(DEBUG1, "verifying consistency of tree structure for index \"%s\"", @@ -433,54 +432,46 @@ bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace, state->heaptuplespresent = 0; /* - * Register our own snapshot in !readonly case, rather than asking + * Register our own snapshot for heapallindexed, rather than asking * table_index_build_scan() to do this for us later. This needs to * happen before index fingerprinting begins, so we can later be * certain that index fingerprinting should have reached all tuples * returned by table_index_build_scan(). */ - if (!state->readonly) - { - snapshot = RegisterSnapshot(GetTransactionSnapshot()); + state->snapshot = RegisterSnapshot(GetTransactionSnapshot()); - /* - * GetTransactionSnapshot() always acquires a new MVCC snapshot in - * READ COMMITTED mode. A new snapshot is guaranteed to have all - * the entries it requires in the index. - * - * We must defend against the possibility that an old xact - * snapshot was returned at higher isolation levels when that - * snapshot is not safe for index scans of the target index. This - * is possible when the snapshot sees tuples that are before the - * index's indcheckxmin horizon. Throwing an error here should be - * very rare. It doesn't seem worth using a secondary snapshot to - * avoid this. - */ - if (IsolationUsesXactSnapshot() && rel->rd_index->indcheckxmin && - !TransactionIdPrecedes(HeapTupleHeaderGetXmin(rel->rd_indextuple->t_data), - snapshot->xmin)) - ereport(ERROR, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("index \"%s\" cannot be verified using transaction snapshot", - RelationGetRelationName(rel)))); - } + /* + * GetTransactionSnapshot() always acquires a new MVCC snapshot in + * READ COMMITTED mode. A new snapshot is guaranteed to have all the + * entries it requires in the index. + * + * We must defend against the possibility that an old xact snapshot + * was returned at higher isolation levels when that snapshot is not + * safe for index scans of the target index. This is possible when + * the snapshot sees tuples that are before the index's indcheckxmin + * horizon. Throwing an error here should be very rare. It doesn't + * seem worth using a secondary snapshot to avoid this. + */ + if (IsolationUsesXactSnapshot() && rel->rd_index->indcheckxmin && + !TransactionIdPrecedes(HeapTupleHeaderGetXmin(rel->rd_indextuple->t_data), + state->snapshot->xmin)) + ereport(ERROR, + errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("index \"%s\" cannot be verified using transaction snapshot", + RelationGetRelationName(rel))); } /* - * We need a snapshot to check the uniqueness of the index. For better - * performance take it once per index check. If snapshot already taken - * reuse it. + * We need a snapshot to check the uniqueness of the index. For better + * performance, take it once per index check. If one was already taken + * above, use that. */ if (state->checkunique) { state->indexinfo = BuildIndexInfo(state->rel); - if (state->indexinfo->ii_Unique) - { - if (snapshot != SnapshotAny) - state->snapshot = snapshot; - else - state->snapshot = RegisterSnapshot(GetTransactionSnapshot()); - } + + if (state->indexinfo->ii_Unique && state->snapshot == InvalidSnapshot) + state->snapshot = RegisterSnapshot(GetTransactionSnapshot()); } Assert(!state->rootdescend || state->readonly); @@ -555,13 +546,12 @@ bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace, /* * Create our own scan for table_index_build_scan(), rather than * getting it to do so for us. This is required so that we can - * actually use the MVCC snapshot registered earlier in !readonly - * case. + * actually use the MVCC snapshot registered earlier. * * Note that table_index_build_scan() calls heap_endscan() for us. */ scan = table_beginscan_strat(state->heaprel, /* relation */ - snapshot, /* snapshot */ + state->snapshot, /* snapshot */ 0, /* number of keys */ NULL, /* scan key */ true, /* buffer access strategy OK */ @@ -569,16 +559,15 @@ bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace, /* * Scan will behave as the first scan of a CREATE INDEX CONCURRENTLY - * behaves in !readonly case. + * behaves. * * It's okay that we don't actually use the same lock strength for the - * heap relation as any other ii_Concurrent caller would in !readonly - * case. We have no reason to care about a concurrent VACUUM - * operation, since there isn't going to be a second scan of the heap - * that needs to be sure that there was no concurrent recycling of - * TIDs. + * heap relation as any other ii_Concurrent caller would. We have no + * reason to care about a concurrent VACUUM operation, since there + * isn't going to be a second scan of the heap that needs to be sure + * that there was no concurrent recycling of TIDs. */ - indexinfo->ii_Concurrent = !state->readonly; + indexinfo->ii_Concurrent = true; /* * Don't wait for uncommitted tuple xact commit/abort when index is a @@ -602,14 +591,11 @@ bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace, state->heaptuplespresent, RelationGetRelationName(heaprel), 100.0 * bloom_prop_bits_set(state->filter)))); - if (snapshot != SnapshotAny) - UnregisterSnapshot(snapshot); - bloom_free(state->filter); } /* Be tidy: */ - if (snapshot == SnapshotAny && state->snapshot != InvalidSnapshot) + if (state->snapshot != InvalidSnapshot) UnregisterSnapshot(state->snapshot); MemoryContextDelete(state->targetcontext); } @@ -721,7 +707,7 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level) errmsg("block %u is not leftmost in index \"%s\"", current, RelationGetRelationName(state->rel)))); - if (level.istruerootlevel && !P_ISROOT(opaque)) + if (level.istruerootlevel && (!P_ISROOT(opaque) && !P_INCOMPLETE_SPLIT(opaque))) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("block %u is not true root in index \"%s\"", @@ -2270,7 +2256,7 @@ bt_child_highkey_check(BtreeCheckState *state, * If we visit page with high key, check that it is equal to the * target key next to corresponding downlink. */ - if (!rightsplit && !P_RIGHTMOST(opaque)) + if (!rightsplit && !P_RIGHTMOST(opaque) && !P_ISHALFDEAD(opaque)) { BTPageOpaque topaque; IndexTuple highkey; diff --git a/contrib/auto_explain/Makefile b/contrib/auto_explain/Makefile index efd127d3cae64..94ab28e7c06b9 100644 --- a/contrib/auto_explain/Makefile +++ b/contrib/auto_explain/Makefile @@ -6,6 +6,8 @@ OBJS = \ auto_explain.o PGFILEDESC = "auto_explain - logging facility for execution plans" +REGRESS = alter_reset + TAP_TESTS = 1 ifdef USE_PGXS diff --git a/contrib/auto_explain/expected/alter_reset.out b/contrib/auto_explain/expected/alter_reset.out new file mode 100644 index 0000000000000..ec355189806ae --- /dev/null +++ b/contrib/auto_explain/expected/alter_reset.out @@ -0,0 +1,19 @@ +-- +-- This tests resetting unknown custom GUCs with reserved prefixes. There's +-- nothing specific to auto_explain; this is just a convenient place to put +-- this test. +-- +SELECT current_database() AS datname \gset +CREATE ROLE regress_ae_role; +ALTER DATABASE :"datname" SET auto_explain.bogus = 1; +ALTER ROLE regress_ae_role SET auto_explain.bogus = 1; +ALTER ROLE regress_ae_role IN DATABASE :"datname" SET auto_explain.bogus = 1; +ALTER SYSTEM SET auto_explain.bogus = 1; +LOAD 'auto_explain'; +WARNING: invalid configuration parameter name "auto_explain.bogus", removing it +DETAIL: "auto_explain" is now a reserved prefix. +ALTER DATABASE :"datname" RESET auto_explain.bogus; +ALTER ROLE regress_ae_role RESET auto_explain.bogus; +ALTER ROLE regress_ae_role IN DATABASE :"datname" RESET auto_explain.bogus; +ALTER SYSTEM RESET auto_explain.bogus; +DROP ROLE regress_ae_role; diff --git a/contrib/auto_explain/meson.build b/contrib/auto_explain/meson.build index 92dc9df6f7cac..a9b45cc235f12 100644 --- a/contrib/auto_explain/meson.build +++ b/contrib/auto_explain/meson.build @@ -20,6 +20,11 @@ tests += { 'name': 'auto_explain', 'sd': meson.current_source_dir(), 'bd': meson.current_build_dir(), + 'regress': { + 'sql': [ + 'alter_reset', + ], + }, 'tap': { 'tests': [ 't/001_auto_explain.pl', diff --git a/contrib/auto_explain/sql/alter_reset.sql b/contrib/auto_explain/sql/alter_reset.sql new file mode 100644 index 0000000000000..bf621454ec24a --- /dev/null +++ b/contrib/auto_explain/sql/alter_reset.sql @@ -0,0 +1,22 @@ +-- +-- This tests resetting unknown custom GUCs with reserved prefixes. There's +-- nothing specific to auto_explain; this is just a convenient place to put +-- this test. +-- + +SELECT current_database() AS datname \gset +CREATE ROLE regress_ae_role; + +ALTER DATABASE :"datname" SET auto_explain.bogus = 1; +ALTER ROLE regress_ae_role SET auto_explain.bogus = 1; +ALTER ROLE regress_ae_role IN DATABASE :"datname" SET auto_explain.bogus = 1; +ALTER SYSTEM SET auto_explain.bogus = 1; + +LOAD 'auto_explain'; + +ALTER DATABASE :"datname" RESET auto_explain.bogus; +ALTER ROLE regress_ae_role RESET auto_explain.bogus; +ALTER ROLE regress_ae_role IN DATABASE :"datname" RESET auto_explain.bogus; +ALTER SYSTEM RESET auto_explain.bogus; + +DROP ROLE regress_ae_role; diff --git a/contrib/btree_gist/Makefile b/contrib/btree_gist/Makefile index 68190ac5e4687..7ac2df26c1044 100644 --- a/contrib/btree_gist/Makefile +++ b/contrib/btree_gist/Makefile @@ -34,7 +34,7 @@ DATA = btree_gist--1.0--1.1.sql \ btree_gist--1.1--1.2.sql btree_gist--1.2.sql btree_gist--1.2--1.3.sql \ btree_gist--1.3--1.4.sql btree_gist--1.4--1.5.sql \ btree_gist--1.5--1.6.sql btree_gist--1.6--1.7.sql \ - btree_gist--1.7--1.8.sql btree_gist--1.8--1.9.sql + btree_gist--1.7--1.8.sql PGFILEDESC = "btree_gist - B-tree equivalent GiST operator classes" REGRESS = init int2 int4 int8 float4 float8 cash oid timestamp timestamptz \ diff --git a/contrib/btree_gist/btree_gist--1.7--1.8.sql b/contrib/btree_gist/btree_gist--1.7--1.8.sql index 8f79365a461f8..22316dc3f566c 100644 --- a/contrib/btree_gist/btree_gist--1.7--1.8.sql +++ b/contrib/btree_gist/btree_gist--1.7--1.8.sql @@ -3,6 +3,203 @@ -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "ALTER EXTENSION btree_gist UPDATE TO '1.8'" to load this file. \quit +-- Add sortsupport functions + +CREATE FUNCTION gbt_bit_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_varbit_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_bool_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_bytea_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_cash_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_date_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_enum_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_float4_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_float8_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_inet_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_int2_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_int4_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_int8_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_intv_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_macaddr_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_macad8_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_numeric_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_oid_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_text_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_bpchar_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_time_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_ts_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +CREATE FUNCTION gbt_uuid_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; + +ALTER OPERATOR FAMILY gist_bit_ops USING gist ADD + FUNCTION 11 (bit, bit) gbt_bit_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_vbit_ops USING gist ADD + FUNCTION 11 (varbit, varbit) gbt_varbit_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_bool_ops USING gist ADD + FUNCTION 11 (bool, bool) gbt_bool_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_bytea_ops USING gist ADD + FUNCTION 11 (bytea, bytea) gbt_bytea_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_cash_ops USING gist ADD + FUNCTION 11 (money, money) gbt_cash_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_date_ops USING gist ADD + FUNCTION 11 (date, date) gbt_date_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_enum_ops USING gist ADD + FUNCTION 11 (anyenum, anyenum) gbt_enum_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_float4_ops USING gist ADD + FUNCTION 11 (float4, float4) gbt_float4_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_float8_ops USING gist ADD + FUNCTION 11 (float8, float8) gbt_float8_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_inet_ops USING gist ADD + FUNCTION 11 (inet, inet) gbt_inet_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_cidr_ops USING gist ADD + FUNCTION 11 (cidr, cidr) gbt_inet_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_int2_ops USING gist ADD + FUNCTION 11 (int2, int2) gbt_int2_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_int4_ops USING gist ADD + FUNCTION 11 (int4, int4) gbt_int4_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_int8_ops USING gist ADD + FUNCTION 11 (int8, int8) gbt_int8_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_interval_ops USING gist ADD + FUNCTION 11 (interval, interval) gbt_intv_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_macaddr_ops USING gist ADD + FUNCTION 11 (macaddr, macaddr) gbt_macaddr_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_macaddr8_ops USING gist ADD + FUNCTION 11 (macaddr8, macaddr8) gbt_macad8_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_numeric_ops USING gist ADD + FUNCTION 11 (numeric, numeric) gbt_numeric_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_oid_ops USING gist ADD + FUNCTION 11 (oid, oid) gbt_oid_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_text_ops USING gist ADD + FUNCTION 11 (text, text) gbt_text_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_bpchar_ops USING gist ADD + FUNCTION 11 (bpchar, bpchar) gbt_bpchar_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_time_ops USING gist ADD + FUNCTION 11 (time, time) gbt_time_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_timetz_ops USING gist ADD + FUNCTION 11 (timetz, timetz) gbt_time_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_timestamp_ops USING gist ADD + FUNCTION 11 (timestamp, timestamp) gbt_ts_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_timestamptz_ops USING gist ADD + FUNCTION 11 (timestamptz, timestamptz) gbt_ts_sortsupport (internal) ; + +ALTER OPERATOR FAMILY gist_uuid_ops USING gist ADD + FUNCTION 11 (uuid, uuid) gbt_uuid_sortsupport (internal) ; + +-- Add translate_cmptype functions + CREATE FUNCTION gist_translate_cmptype_btree(int) RETURNS smallint AS 'MODULE_PATHNAME' diff --git a/contrib/btree_gist/btree_gist--1.8--1.9.sql b/contrib/btree_gist/btree_gist--1.8--1.9.sql deleted file mode 100644 index 4b38749bf5f34..0000000000000 --- a/contrib/btree_gist/btree_gist--1.8--1.9.sql +++ /dev/null @@ -1,197 +0,0 @@ -/* contrib/btree_gist/btree_gist--1.7--1.8.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "ALTER EXTENSION btree_gist UPDATE TO '1.9'" to load this file. \quit - -CREATE FUNCTION gbt_bit_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_varbit_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_bool_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_bytea_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_cash_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_date_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_enum_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_float4_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_float8_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_inet_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_int2_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_int4_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_int8_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_intv_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_macaddr_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_macad8_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_numeric_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_oid_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_text_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_bpchar_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_time_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_ts_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -CREATE FUNCTION gbt_uuid_sortsupport(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT; - -ALTER OPERATOR FAMILY gist_bit_ops USING gist ADD - FUNCTION 11 (bit, bit) gbt_bit_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_vbit_ops USING gist ADD - FUNCTION 11 (varbit, varbit) gbt_varbit_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_bool_ops USING gist ADD - FUNCTION 11 (bool, bool) gbt_bool_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_bytea_ops USING gist ADD - FUNCTION 11 (bytea, bytea) gbt_bytea_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_cash_ops USING gist ADD - FUNCTION 11 (money, money) gbt_cash_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_date_ops USING gist ADD - FUNCTION 11 (date, date) gbt_date_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_enum_ops USING gist ADD - FUNCTION 11 (anyenum, anyenum) gbt_enum_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_float4_ops USING gist ADD - FUNCTION 11 (float4, float4) gbt_float4_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_float8_ops USING gist ADD - FUNCTION 11 (float8, float8) gbt_float8_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_inet_ops USING gist ADD - FUNCTION 11 (inet, inet) gbt_inet_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_cidr_ops USING gist ADD - FUNCTION 11 (cidr, cidr) gbt_inet_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_int2_ops USING gist ADD - FUNCTION 11 (int2, int2) gbt_int2_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_int4_ops USING gist ADD - FUNCTION 11 (int4, int4) gbt_int4_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_int8_ops USING gist ADD - FUNCTION 11 (int8, int8) gbt_int8_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_interval_ops USING gist ADD - FUNCTION 11 (interval, interval) gbt_intv_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_macaddr_ops USING gist ADD - FUNCTION 11 (macaddr, macaddr) gbt_macaddr_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_macaddr8_ops USING gist ADD - FUNCTION 11 (macaddr8, macaddr8) gbt_macad8_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_numeric_ops USING gist ADD - FUNCTION 11 (numeric, numeric) gbt_numeric_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_oid_ops USING gist ADD - FUNCTION 11 (oid, oid) gbt_oid_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_text_ops USING gist ADD - FUNCTION 11 (text, text) gbt_text_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_bpchar_ops USING gist ADD - FUNCTION 11 (bpchar, bpchar) gbt_bpchar_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_time_ops USING gist ADD - FUNCTION 11 (time, time) gbt_time_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_timetz_ops USING gist ADD - FUNCTION 11 (timetz, timetz) gbt_time_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_timestamp_ops USING gist ADD - FUNCTION 11 (timestamp, timestamp) gbt_ts_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_timestamptz_ops USING gist ADD - FUNCTION 11 (timestamptz, timestamptz) gbt_ts_sortsupport (internal) ; - -ALTER OPERATOR FAMILY gist_uuid_ops USING gist ADD - FUNCTION 11 (uuid, uuid) gbt_uuid_sortsupport (internal) ; diff --git a/contrib/btree_gist/btree_gist.control b/contrib/btree_gist/btree_gist.control index 69d9341a0adea..abf66538f3244 100644 --- a/contrib/btree_gist/btree_gist.control +++ b/contrib/btree_gist/btree_gist.control @@ -1,6 +1,6 @@ # btree_gist extension comment = 'support for indexing common datatypes in GiST' -default_version = '1.9' +default_version = '1.8' module_pathname = '$libdir/btree_gist' relocatable = true trusted = true diff --git a/contrib/btree_gist/meson.build b/contrib/btree_gist/meson.build index 89932dd3844ee..f4fa9574f1fd7 100644 --- a/contrib/btree_gist/meson.build +++ b/contrib/btree_gist/meson.build @@ -51,7 +51,6 @@ install_data( 'btree_gist--1.5--1.6.sql', 'btree_gist--1.6--1.7.sql', 'btree_gist--1.7--1.8.sql', - 'btree_gist--1.8--1.9.sql', kwargs: contrib_data_args, ) diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index 8a0b112a7ff29..ec0c832e9214f 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -2665,7 +2665,7 @@ dblink_connstr_has_required_scram_options(const char *connstr) PQconninfoFree(options); } - has_scram_keys = has_scram_client_key && has_scram_server_key && MyProcPort->has_scram_keys; + has_scram_keys = has_scram_client_key && has_scram_server_key && MyProcPort != NULL && MyProcPort->has_scram_keys; return (has_scram_keys && has_require_auth); } @@ -2698,7 +2698,7 @@ dblink_security_check(PGconn *conn, const char *connname, const char *connstr) * only added if UseScramPassthrough is set, and the user is not allowed * to add the SCRAM keys on fdw and user mapping options. */ - if (MyProcPort->has_scram_keys && dblink_connstr_has_required_scram_options(connstr)) + if (MyProcPort != NULL && MyProcPort->has_scram_keys && dblink_connstr_has_required_scram_options(connstr)) return; #ifdef ENABLE_GSS @@ -2771,7 +2771,7 @@ dblink_connstr_check(const char *connstr) if (dblink_connstr_has_pw(connstr)) return; - if (MyProcPort->has_scram_keys && dblink_connstr_has_required_scram_options(connstr)) + if (MyProcPort != NULL && MyProcPort->has_scram_keys && dblink_connstr_has_required_scram_options(connstr)) return; #ifdef ENABLE_GSS @@ -2931,7 +2931,7 @@ get_connect_string(const char *servername) * the user overwrites these options we can ereport on * dblink_connstr_check and dblink_security_check. */ - if (MyProcPort->has_scram_keys && UseScramPassthrough(foreign_server, user_mapping)) + if (MyProcPort != NULL && MyProcPort->has_scram_keys && UseScramPassthrough(foreign_server, user_mapping)) appendSCRAMKeysInfo(&buf); foreach(cell, fdw->options) diff --git a/contrib/intarray/_int_selfuncs.c b/contrib/intarray/_int_selfuncs.c index 6c3b7ace146aa..9bf6448624254 100644 --- a/contrib/intarray/_int_selfuncs.c +++ b/contrib/intarray/_int_selfuncs.c @@ -177,7 +177,7 @@ _int_matchsel(PG_FUNCTION_ARGS) if (query->size == 0) { ReleaseVariableStats(vardata); - return (Selectivity) 0.0; + PG_RETURN_FLOAT8(0.0); } /* diff --git a/contrib/pg_buffercache/pg_buffercache_pages.c b/contrib/pg_buffercache/pg_buffercache_pages.c index 4b007f6e1b06a..5a3d78d03d5b4 100644 --- a/contrib/pg_buffercache/pg_buffercache_pages.c +++ b/contrib/pg_buffercache/pg_buffercache_pages.c @@ -194,6 +194,8 @@ pg_buffercache_pages(PG_FUNCTION_ARGS) BufferDesc *bufHdr; uint32 buf_state; + CHECK_FOR_INTERRUPTS(); + bufHdr = GetBufferDescriptor(i); /* Lock each buffer header before inspecting. */ buf_state = LockBufHdr(bufHdr); @@ -320,7 +322,6 @@ pg_buffercache_numa_pages(PG_FUNCTION_ARGS) uint64 os_page_count; int pages_per_buffer; int max_entries; - volatile uint64 touch pg_attribute_unused(); char *startptr, *endptr; @@ -365,7 +366,7 @@ pg_buffercache_numa_pages(PG_FUNCTION_ARGS) /* Used to determine the NUMA node for all OS pages at once */ os_page_ptrs = palloc0(sizeof(void *) * os_page_count); - os_page_status = palloc(sizeof(uint64) * os_page_count); + os_page_status = palloc(sizeof(int) * os_page_count); /* Fill pointers for all the memory pages. */ idx = 0; @@ -375,7 +376,7 @@ pg_buffercache_numa_pages(PG_FUNCTION_ARGS) /* Only need to touch memory once per backend process lifetime */ if (firstNumaTouch) - pg_numa_touch_mem_if_required(touch, ptr); + pg_numa_touch_mem_if_required(ptr); } Assert(idx == os_page_count); @@ -561,6 +562,8 @@ pg_buffercache_summary(PG_FUNCTION_ARGS) BufferDesc *bufHdr; uint32 buf_state; + CHECK_FOR_INTERRUPTS(); + /* * This function summarizes the state of all headers. Locking the * buffer headers wouldn't provide an improved result as the state of @@ -621,6 +624,8 @@ pg_buffercache_usage_counts(PG_FUNCTION_ARGS) uint32 buf_state = pg_atomic_read_u32(&bufHdr->state); int usage_count; + CHECK_FOR_INTERRUPTS(); + usage_count = BUF_STATE_GET_USAGECOUNT(buf_state); usage_counts[usage_count]++; diff --git a/contrib/pg_prewarm/pg_prewarm.c b/contrib/pg_prewarm/pg_prewarm.c index b968933ea8b6c..5b519a2c85422 100644 --- a/contrib/pg_prewarm/pg_prewarm.c +++ b/contrib/pg_prewarm/pg_prewarm.c @@ -16,9 +16,11 @@ #include #include "access/relation.h" +#include "catalog/index.h" #include "fmgr.h" #include "miscadmin.h" #include "storage/bufmgr.h" +#include "storage/lmgr.h" #include "storage/read_stream.h" #include "storage/smgr.h" #include "utils/acl.h" @@ -71,6 +73,8 @@ pg_prewarm(PG_FUNCTION_ARGS) char *ttype; PrewarmType ptype; AclResult aclresult; + char relkind; + Oid privOid; /* Basic sanity checking. */ if (PG_ARGISNULL(0)) @@ -106,9 +110,43 @@ pg_prewarm(PG_FUNCTION_ARGS) forkString = text_to_cstring(forkName); forkNumber = forkname_to_number(forkString); - /* Open relation and check privileges. */ + /* + * Open relation and check privileges. If the relation is an index, we + * must check the privileges on its parent table instead. + */ + relkind = get_rel_relkind(relOid); + if (relkind == RELKIND_INDEX || + relkind == RELKIND_PARTITIONED_INDEX) + { + privOid = IndexGetRelation(relOid, true); + + /* Lock table before index to avoid deadlock. */ + if (OidIsValid(privOid)) + LockRelationOid(privOid, AccessShareLock); + } + else + privOid = relOid; + rel = relation_open(relOid, AccessShareLock); - aclresult = pg_class_aclcheck(relOid, GetUserId(), ACL_SELECT); + + /* + * It's possible that the relation with OID "privOid" was dropped and the + * OID was reused before we locked it. If that happens, we could be left + * with the wrong parent table OID, in which case we must ERROR. It's + * possible that such a race would change the outcome of + * get_rel_relkind(), too, but the worst case scenario there is that we'll + * check privileges on the index instead of its parent table, which isn't + * too terrible. + */ + if (!OidIsValid(privOid) || + (privOid != relOid && + privOid != IndexGetRelation(relOid, true))) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_TABLE), + errmsg("could not find parent table of index \"%s\"", + RelationGetRelationName(rel)))); + + aclresult = pg_class_aclcheck(privOid, GetUserId(), ACL_SELECT); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind), get_rel_name(relOid)); @@ -233,8 +271,11 @@ pg_prewarm(PG_FUNCTION_ARGS) read_stream_end(stream); } - /* Close relation, release lock. */ + /* Close relation, release locks. */ relation_close(rel, AccessShareLock); + if (privOid != relOid) + UnlockRelationOid(privOid, AccessShareLock); + PG_RETURN_INT64(blocks_done); } diff --git a/contrib/pg_prewarm/t/001_basic.pl b/contrib/pg_prewarm/t/001_basic.pl index 0a8259d367854..ed70ceb4fca03 100644 --- a/contrib/pg_prewarm/t/001_basic.pl +++ b/contrib/pg_prewarm/t/001_basic.pl @@ -11,7 +11,7 @@ my $node = PostgreSQL::Test::Cluster->new('main'); -$node->init; +$node->init('auth_extra' => [ '--create-role', 'test_user' ]); $node->append_conf( 'postgresql.conf', qq{shared_preload_libraries = 'pg_prewarm' @@ -23,7 +23,9 @@ $node->safe_psql("postgres", "CREATE EXTENSION pg_prewarm;\n" . "CREATE TABLE test(c1 int);\n" - . "INSERT INTO test SELECT generate_series(1, 100);"); + . "INSERT INTO test SELECT generate_series(1, 100);\n" + . "CREATE INDEX test_idx ON test(c1);\n" + . "CREATE ROLE test_user LOGIN;"); # test read mode my $result = @@ -42,6 +44,31 @@ or $stderr =~ qr/prefetch is not supported by this build/), 'prefetch mode succeeded'); +# test_user should be unable to prewarm table/index without privileges +($cmdret, $stdout, $stderr) = + $node->psql( + "postgres", "SELECT pg_prewarm('test');", + extra_params => [ '--username' => 'test_user' ]); +ok($stderr =~ /permission denied for table test/, 'pg_prewarm failed as expected'); +($cmdret, $stdout, $stderr) = + $node->psql( + "postgres", "SELECT pg_prewarm('test_idx');", + extra_params => [ '--username' => 'test_user' ]); +ok($stderr =~ /permission denied for index test_idx/, 'pg_prewarm failed as expected'); + +# test_user should be able to prewarm table/index with privileges +$node->safe_psql("postgres", "GRANT SELECT ON test TO test_user;"); +$result = + $node->safe_psql( + "postgres", "SELECT pg_prewarm('test');", + extra_params => [ '--username' => 'test_user' ]); +like($result, qr/^[1-9][0-9]*$/, 'pg_prewarm succeeded as expected'); +$result = + $node->safe_psql( + "postgres", "SELECT pg_prewarm('test_idx');", + extra_params => [ '--username' => 'test_user' ]); +like($result, qr/^[1-9][0-9]*$/, 'pg_prewarm succeeded as expected'); + # test autoprewarm_dump_now() $result = $node->safe_psql("postgres", "SELECT autoprewarm_dump_now();"); like($result, qr/^[1-9][0-9]*$/, 'autoprewarm_dump_now succeeded'); diff --git a/contrib/pg_stat_statements/expected/squashing.out b/contrib/pg_stat_statements/expected/squashing.out index f952f47ef7be1..d5bb67c7222fa 100644 --- a/contrib/pg_stat_statements/expected/squashing.out +++ b/contrib/pg_stat_statements/expected/squashing.out @@ -809,6 +809,84 @@ SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; select where $1 IN ($2 /*, ... */) | 2 (2 rows) +-- composite function with row expansion +create table test_composite(x integer); +CREATE FUNCTION composite_f(a integer[], out x integer, out y integer) returns +record as $$ begin + x = a[1]; + y = a[2]; + end; +$$ language plpgsql; +SELECT pg_stat_statements_reset() IS NOT NULL AS t; + t +--- + t +(1 row) + +SELECT ((composite_f(array[1, 2]))).* FROM test_composite; + x | y +---+--- +(0 rows) + +SELECT ((composite_f(array[1, 2, 3]))).* FROM test_composite; + x | y +---+--- +(0 rows) + +SELECT ((composite_f(array[1, 2, 3]))).*, 1, 2, 3, ((composite_f(array[1, 2, 3]))).*, 1, 2 +FROM test_composite +WHERE x IN (1, 2, 3); + x | y | ?column? | ?column? | ?column? | x | y | ?column? | ?column? +---+---+----------+----------+----------+---+---+----------+---------- +(0 rows) + +SELECT ((composite_f(array[1, $1, 3]))).*, 1 FROM test_composite \bind 1 +; + x | y | ?column? +---+---+---------- +(0 rows) + +-- ROW() expression with row expansion +SELECT (ROW(ARRAY[1,2])).*; + f1 +------- + {1,2} +(1 row) + +SELECT (ROW(ARRAY[1, 2], ARRAY[1, 2, 3])).*; + f1 | f2 +-------+--------- + {1,2} | {1,2,3} +(1 row) + +SELECT 1, 2, (ROW(ARRAY[1, 2], ARRAY[1, 2, 3])).*, 3, 4; + ?column? | ?column? | f1 | f2 | ?column? | ?column? +----------+----------+-------+---------+----------+---------- + 1 | 2 | {1,2} | {1,2,3} | 3 | 4 +(1 row) + +SELECT (ROW(ARRAY[1, 2], ARRAY[1, $1, 3])).*, 1 \bind 1 +; + f1 | f2 | ?column? +-------+---------+---------- + {1,2} | {1,1,3} | 1 +(1 row) + +SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; + query | calls +-------------------------------------------------------------------------------------------------------------+------- + SELECT $1, $2, (ROW(ARRAY[$3 /*, ... */], ARRAY[$4 /*, ... */])).*, $5, $6 | 1 + SELECT ((composite_f(array[$1 /*, ... */]))).* FROM test_composite | 2 + SELECT ((composite_f(array[$1 /*, ... */]))).*, $2 FROM test_composite | 1 + SELECT ((composite_f(array[$1 /*, ... */]))).*, $2, $3, $4, ((composite_f(array[$5 /*, ... */]))).*, $6, $7+| 1 + FROM test_composite +| + WHERE x IN ($8 /*, ... */) | + SELECT (ROW(ARRAY[$1 /*, ... */])).* | 1 + SELECT (ROW(ARRAY[$1 /*, ... */], ARRAY[$2 /*, ... */])).* | 1 + SELECT (ROW(ARRAY[$1 /*, ... */], ARRAY[$2 /*, ... */])).*, $3 | 1 + SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 +(8 rows) + -- -- cleanup -- @@ -818,3 +896,5 @@ DROP TABLE test_squash_numeric; DROP TABLE test_squash_bigint; DROP TABLE test_squash_cast CASCADE; DROP TABLE test_squash_jsonb; +DROP TABLE test_composite; +DROP FUNCTION composite_f; diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index e7857f81ec057..9b3a33118f468 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -2915,9 +2915,8 @@ generate_normalized_query(JumbleState *jstate, const char *query, * have originated from within the authoritative parser, this should not be * a problem. * - * Duplicate constant pointers are possible, and will have their lengths - * marked as '-1', so that they are later ignored. (Actually, we assume the - * lengths were initialized as -1 to start with, and don't change them here.) + * Multiple constants can have the same location. We reset lengths of those + * past the first to -1 so that they can later be ignored. * * If query_loc > 0, then "query" has been advanced by that much compared to * the original string start, so we need to translate the provided locations @@ -2937,8 +2936,6 @@ fill_in_constant_lengths(JumbleState *jstate, const char *query, core_yy_extra_type yyextra; core_YYSTYPE yylval; YYLTYPE yylloc; - int last_loc = -1; - int i; /* * Sort the records by location so that we can process them in order while @@ -2959,23 +2956,29 @@ fill_in_constant_lengths(JumbleState *jstate, const char *query, yyextra.escape_string_warning = false; /* Search for each constant, in sequence */ - for (i = 0; i < jstate->clocations_count; i++) + for (int i = 0; i < jstate->clocations_count; i++) { - int loc = locs[i].location; + int loc; int tok; - /* Adjust recorded location if we're dealing with partial string */ - loc -= query_loc; - - Assert(loc >= 0); + /* Ignore constants after the first one in the same location */ + if (i > 0 && locs[i].location == locs[i - 1].location) + { + locs[i].length = -1; + continue; + } if (locs[i].squashed) continue; /* squashable list, ignore */ - if (loc <= last_loc) - continue; /* Duplicate constant, ignore */ + /* Adjust recorded location if we're dealing with partial string */ + loc = locs[i].location - query_loc; + Assert(loc >= 0); - /* Lex tokens until we find the desired constant */ + /* + * We have a valid location for a constant that's not a dupe. Lex + * tokens until we find the desired constant. + */ for (;;) { tok = core_yylex(&yylval, &yylloc, yyscanner); @@ -3021,8 +3024,6 @@ fill_in_constant_lengths(JumbleState *jstate, const char *query, /* If we hit end-of-string, give up, leaving remaining lengths -1 */ if (tok == 0) break; - - last_loc = loc; } scanner_finish(yyscanner); diff --git a/contrib/pg_stat_statements/sql/squashing.sql b/contrib/pg_stat_statements/sql/squashing.sql index 53138d125a92c..03b0515f87285 100644 --- a/contrib/pg_stat_statements/sql/squashing.sql +++ b/contrib/pg_stat_statements/sql/squashing.sql @@ -291,6 +291,30 @@ select where '1' IN ('1'::int::text, '2'::int::text); select where '1' = ANY (array['1'::int::text, '2'::int::text]); SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; +-- composite function with row expansion +create table test_composite(x integer); +CREATE FUNCTION composite_f(a integer[], out x integer, out y integer) returns +record as $$ begin + x = a[1]; + y = a[2]; + end; +$$ language plpgsql; +SELECT pg_stat_statements_reset() IS NOT NULL AS t; +SELECT ((composite_f(array[1, 2]))).* FROM test_composite; +SELECT ((composite_f(array[1, 2, 3]))).* FROM test_composite; +SELECT ((composite_f(array[1, 2, 3]))).*, 1, 2, 3, ((composite_f(array[1, 2, 3]))).*, 1, 2 +FROM test_composite +WHERE x IN (1, 2, 3); +SELECT ((composite_f(array[1, $1, 3]))).*, 1 FROM test_composite \bind 1 +; +-- ROW() expression with row expansion +SELECT (ROW(ARRAY[1,2])).*; +SELECT (ROW(ARRAY[1, 2], ARRAY[1, 2, 3])).*; +SELECT 1, 2, (ROW(ARRAY[1, 2], ARRAY[1, 2, 3])).*, 3, 4; +SELECT (ROW(ARRAY[1, 2], ARRAY[1, $1, 3])).*, 1 \bind 1 +; +SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; + -- -- cleanup -- @@ -300,3 +324,5 @@ DROP TABLE test_squash_numeric; DROP TABLE test_squash_bigint; DROP TABLE test_squash_cast CASCADE; DROP TABLE test_squash_jsonb; +DROP TABLE test_composite; +DROP FUNCTION composite_f; diff --git a/contrib/pg_trgm/expected/pg_trgm.out b/contrib/pg_trgm/expected/pg_trgm.out index 0b70d9de25624..04da98170ab15 100644 --- a/contrib/pg_trgm/expected/pg_trgm.out +++ b/contrib/pg_trgm/expected/pg_trgm.out @@ -4693,6 +4693,23 @@ select count(*) from test_trgm where t like '%99%' and t like '%qw%'; 19 (1 row) +explain (costs off) +select count(*) from test_trgm where t %> '' and t %> '%qwerty%'; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on test_trgm + Recheck Cond: ((t %> ''::text) AND (t %> '%qwerty%'::text)) + -> Bitmap Index Scan on trgm_idx + Index Cond: ((t %> ''::text) AND (t %> '%qwerty%'::text)) +(5 rows) + +select count(*) from test_trgm where t %> '' and t %> '%qwerty%'; + count +------- + 0 +(1 row) + -- ensure that pending-list items are handled correctly, too create temp table t_test_trgm(t text COLLATE "C"); create index t_trgm_idx on t_test_trgm using gin (t gin_trgm_ops); @@ -4731,6 +4748,23 @@ select count(*) from t_test_trgm where t like '%99%' and t like '%qw%'; 1 (1 row) +explain (costs off) +select count(*) from t_test_trgm where t %> '' and t %> '%qwerty%'; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on t_test_trgm + Recheck Cond: ((t %> ''::text) AND (t %> '%qwerty%'::text)) + -> Bitmap Index Scan on t_trgm_idx + Index Cond: ((t %> ''::text) AND (t %> '%qwerty%'::text)) +(5 rows) + +select count(*) from t_test_trgm where t %> '' and t %> '%qwerty%'; + count +------- + 0 +(1 row) + -- run the same queries with sequential scan to check the results set enable_bitmapscan=off; set enable_seqscan=on; @@ -4746,6 +4780,12 @@ select count(*) from test_trgm where t like '%99%' and t like '%qw%'; 19 (1 row) +select count(*) from test_trgm where t %> '' and t %> '%qwerty%'; + count +------- + 0 +(1 row) + select count(*) from t_test_trgm where t like '%99%' and t like '%qwerty%'; count ------- @@ -4758,6 +4798,12 @@ select count(*) from t_test_trgm where t like '%99%' and t like '%qw%'; 1 (1 row) +select count(*) from t_test_trgm where t %> '' and t %> '%qwerty%'; + count +------- + 0 +(1 row) + reset enable_bitmapscan; create table test2(t text COLLATE "C"); insert into test2 values ('abcdef'); diff --git a/contrib/pg_trgm/sql/pg_trgm.sql b/contrib/pg_trgm/sql/pg_trgm.sql index 340c9891899f0..44debced6d581 100644 --- a/contrib/pg_trgm/sql/pg_trgm.sql +++ b/contrib/pg_trgm/sql/pg_trgm.sql @@ -80,6 +80,9 @@ select count(*) from test_trgm where t like '%99%' and t like '%qwerty%'; explain (costs off) select count(*) from test_trgm where t like '%99%' and t like '%qw%'; select count(*) from test_trgm where t like '%99%' and t like '%qw%'; +explain (costs off) +select count(*) from test_trgm where t %> '' and t %> '%qwerty%'; +select count(*) from test_trgm where t %> '' and t %> '%qwerty%'; -- ensure that pending-list items are handled correctly, too create temp table t_test_trgm(t text COLLATE "C"); create index t_trgm_idx on t_test_trgm using gin (t gin_trgm_ops); @@ -90,14 +93,19 @@ select count(*) from t_test_trgm where t like '%99%' and t like '%qwerty%'; explain (costs off) select count(*) from t_test_trgm where t like '%99%' and t like '%qw%'; select count(*) from t_test_trgm where t like '%99%' and t like '%qw%'; +explain (costs off) +select count(*) from t_test_trgm where t %> '' and t %> '%qwerty%'; +select count(*) from t_test_trgm where t %> '' and t %> '%qwerty%'; -- run the same queries with sequential scan to check the results set enable_bitmapscan=off; set enable_seqscan=on; select count(*) from test_trgm where t like '%99%' and t like '%qwerty%'; select count(*) from test_trgm where t like '%99%' and t like '%qw%'; +select count(*) from test_trgm where t %> '' and t %> '%qwerty%'; select count(*) from t_test_trgm where t like '%99%' and t like '%qwerty%'; select count(*) from t_test_trgm where t like '%99%' and t like '%qw%'; +select count(*) from t_test_trgm where t %> '' and t %> '%qwerty%'; reset enable_bitmapscan; create table test2(t text COLLATE "C"); diff --git a/contrib/pgcrypto/expected/hmac-md5_2.out b/contrib/pgcrypto/expected/hmac-md5_2.out new file mode 100644 index 0000000000000..08cdf95d53245 --- /dev/null +++ b/contrib/pgcrypto/expected/hmac-md5_2.out @@ -0,0 +1,44 @@ +-- +-- HMAC-MD5 +-- +SELECT hmac( +'Hi There', +'\x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b'::bytea, +'md5'); +ERROR: Cannot use "md5": No such hash algorithm +-- 2 +SELECT hmac( +'Jefe', +'what do ya want for nothing?', +'md5'); +ERROR: Cannot use "md5": No such hash algorithm +-- 3 +SELECT hmac( +'\xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd'::bytea, +'\xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'::bytea, +'md5'); +ERROR: Cannot use "md5": No such hash algorithm +-- 4 +SELECT hmac( +'\xcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd'::bytea, +'\x0102030405060708090a0b0c0d0e0f10111213141516171819'::bytea, +'md5'); +ERROR: Cannot use "md5": No such hash algorithm +-- 5 +SELECT hmac( +'Test With Truncation', +'\x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c'::bytea, +'md5'); +ERROR: Cannot use "md5": No such hash algorithm +-- 6 +SELECT hmac( +'Test Using Larger Than Block-Size Key - Hash Key First', +'\xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'::bytea, +'md5'); +ERROR: Cannot use "md5": No such hash algorithm +-- 7 +SELECT hmac( +'Test Using Larger Than Block-Size Key and Larger Than One Block-Size Data', +'\xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'::bytea, +'md5'); +ERROR: Cannot use "md5": No such hash algorithm diff --git a/contrib/pgcrypto/expected/md5_2.out b/contrib/pgcrypto/expected/md5_2.out new file mode 100644 index 0000000000000..51bdaa86f32b4 --- /dev/null +++ b/contrib/pgcrypto/expected/md5_2.out @@ -0,0 +1,17 @@ +-- +-- MD5 message digest +-- +SELECT digest('', 'md5'); +ERROR: Cannot use "md5": No such hash algorithm +SELECT digest('a', 'md5'); +ERROR: Cannot use "md5": No such hash algorithm +SELECT digest('abc', 'md5'); +ERROR: Cannot use "md5": No such hash algorithm +SELECT digest('message digest', 'md5'); +ERROR: Cannot use "md5": No such hash algorithm +SELECT digest('abcdefghijklmnopqrstuvwxyz', 'md5'); +ERROR: Cannot use "md5": No such hash algorithm +SELECT digest('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', 'md5'); +ERROR: Cannot use "md5": No such hash algorithm +SELECT digest('12345678901234567890123456789012345678901234567890123456789012345678901234567890', 'md5'); +ERROR: Cannot use "md5": No such hash algorithm diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c index 0d9c2b0b65369..dbe7a190aaf6d 100644 --- a/contrib/pgstattuple/pgstattuple.c +++ b/contrib/pgstattuple/pgstattuple.c @@ -424,7 +424,7 @@ pgstat_btree_page(pgstattuple_type *stat, Relation rel, BlockNumber blkno, /* fully empty page */ stat->free_space += BLCKSZ; } - else + else if (PageGetSpecialSize(page) == MAXALIGN(sizeof(BTPageOpaqueData))) { BTPageOpaque opaque; @@ -458,10 +458,16 @@ pgstat_hash_page(pgstattuple_type *stat, Relation rel, BlockNumber blkno, Buffer buf; Page page; - buf = _hash_getbuf_with_strategy(rel, blkno, HASH_READ, 0, bstrategy); + buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy); + LockBuffer(buf, HASH_READ); page = BufferGetPage(buf); - if (PageGetSpecialSize(page) == MAXALIGN(sizeof(HashPageOpaqueData))) + if (PageIsNew(page)) + { + /* fully empty page */ + stat->free_space += BLCKSZ; + } + else if (PageGetSpecialSize(page) == MAXALIGN(sizeof(HashPageOpaqueData))) { HashPageOpaque opaque; @@ -502,17 +508,23 @@ pgstat_gist_page(pgstattuple_type *stat, Relation rel, BlockNumber blkno, buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy); LockBuffer(buf, GIST_SHARE); - gistcheckpage(rel, buf); page = BufferGetPage(buf); - - if (GistPageIsLeaf(page)) + if (PageIsNew(page)) { - pgstat_index_page(stat, page, FirstOffsetNumber, - PageGetMaxOffsetNumber(page)); + /* fully empty page */ + stat->free_space += BLCKSZ; } - else + else if (PageGetSpecialSize(page) == MAXALIGN(sizeof(GISTPageOpaqueData))) { - /* root or node */ + if (GistPageIsLeaf(page)) + { + pgstat_index_page(stat, page, FirstOffsetNumber, + PageGetMaxOffsetNumber(page)); + } + else + { + /* root or node */ + } } UnlockReleaseBuffer(buf); diff --git a/contrib/postgres_fdw/.gitignore b/contrib/postgres_fdw/.gitignore index 5dcb3ff972350..b4903eba657fa 100644 --- a/contrib/postgres_fdw/.gitignore +++ b/contrib/postgres_fdw/.gitignore @@ -1,4 +1,6 @@ # Generated subdirectories /log/ /results/ +/output_iso/ /tmp_check/ +/tmp_check_iso/ diff --git a/contrib/postgres_fdw/Makefile b/contrib/postgres_fdw/Makefile index adfbd2ef758e0..8eaf4d263b688 100644 --- a/contrib/postgres_fdw/Makefile +++ b/contrib/postgres_fdw/Makefile @@ -17,6 +17,8 @@ EXTENSION = postgres_fdw DATA = postgres_fdw--1.0.sql postgres_fdw--1.0--1.1.sql postgres_fdw--1.1--1.2.sql REGRESS = postgres_fdw query_cancel +ISOLATION = eval_plan_qual +ISOLATION_OPTS = --load-extension=postgres_fdw TAP_TESTS = 1 ifdef USE_PGXS diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c index 304f3c20f8356..eb255e74d155d 100644 --- a/contrib/postgres_fdw/connection.c +++ b/contrib/postgres_fdw/connection.c @@ -462,7 +462,7 @@ pgfdw_security_check(const char **keywords, const char **values, UserMapping *us * assume that UseScramPassthrough is also true since SCRAM options are * only set when UseScramPassthrough is enabled. */ - if (MyProcPort->has_scram_keys && pgfdw_has_required_scram_options(keywords, values)) + if (MyProcPort != NULL && MyProcPort->has_scram_keys && pgfdw_has_required_scram_options(keywords, values)) return; ereport(ERROR, @@ -568,7 +568,7 @@ connect_pg_server(ForeignServer *server, UserMapping *user) n++; /* Add required SCRAM pass-through connection options if it's enabled. */ - if (MyProcPort->has_scram_keys && UseScramPassthrough(server, user)) + if (MyProcPort != NULL && MyProcPort->has_scram_keys && UseScramPassthrough(server, user)) { int len; int encoded_len; @@ -743,7 +743,7 @@ check_conn_params(const char **keywords, const char **values, UserMapping *user) * assume that UseScramPassthrough is also true since SCRAM options are * only set when UseScramPassthrough is enabled. */ - if (MyProcPort->has_scram_keys && pgfdw_has_required_scram_options(keywords, values)) + if (MyProcPort != NULL && MyProcPort->has_scram_keys && pgfdw_has_required_scram_options(keywords, values)) return; ereport(ERROR, @@ -2557,7 +2557,7 @@ pgfdw_has_required_scram_options(const char **keywords, const char **values) } } - has_scram_keys = has_scram_client_key && has_scram_server_key && MyProcPort->has_scram_keys; + has_scram_keys = has_scram_client_key && has_scram_server_key && MyProcPort != NULL && MyProcPort->has_scram_keys; return (has_scram_keys && has_require_auth); } diff --git a/contrib/postgres_fdw/expected/eval_plan_qual.out b/contrib/postgres_fdw/expected/eval_plan_qual.out new file mode 100644 index 0000000000000..cee4c6ad1a8ad --- /dev/null +++ b/contrib/postgres_fdw/expected/eval_plan_qual.out @@ -0,0 +1,131 @@ +Parsed test spec with 2 sessions + +starting permutation: s0_update_l s1_tuplock_l_0 s0_commit s1_commit +step s0_update_l: UPDATE l SET i = i + 1; +step s1_tuplock_l_0: + EXPLAIN (VERBOSE, COSTS OFF) + SELECT l.* FROM l, ft WHERE l.i = ft.i AND l.i = 123 FOR UPDATE OF l; + SELECT l.* FROM l, ft WHERE l.i = ft.i AND l.i = 123 FOR UPDATE OF l; + +step s0_commit: COMMIT; +step s1_tuplock_l_0: <... completed> +QUERY PLAN +--------------------------------------------------------------------- +LockRows + Output: l.i, l.v, l.ctid, ft.* + -> Nested Loop + Output: l.i, l.v, l.ctid, ft.* + -> Seq Scan on public.l + Output: l.i, l.v, l.ctid + Filter: (l.i = 123) + -> Foreign Scan on public.ft + Output: ft.*, ft.i + Remote SQL: SELECT i, v FROM public.t WHERE ((i = 123)) +(10 rows) + +i|v +-+- +(0 rows) + +step s1_commit: COMMIT; + +starting permutation: s0_update_l s1_tuplock_l_1 s0_commit s1_commit +step s0_update_l: UPDATE l SET i = i + 1; +step s1_tuplock_l_1: + EXPLAIN (VERBOSE, COSTS OFF) + SELECT l.* FROM l, ft WHERE l.i = ft.i AND l.v = 'foo' FOR UPDATE OF l; + SELECT l.* FROM l, ft WHERE l.i = ft.i AND l.v = 'foo' FOR UPDATE OF l; + +step s0_commit: COMMIT; +step s1_tuplock_l_1: <... completed> +QUERY PLAN +----------------------------------------------------------------------------- +LockRows + Output: l.i, l.v, l.ctid, ft.* + -> Nested Loop + Output: l.i, l.v, l.ctid, ft.* + -> Seq Scan on public.l + Output: l.i, l.v, l.ctid + Filter: (l.v = 'foo'::text) + -> Foreign Scan on public.ft + Output: ft.*, ft.i + Remote SQL: SELECT i, v FROM public.t WHERE ((i = $1::integer)) +(10 rows) + +i|v +-+- +(0 rows) + +step s1_commit: COMMIT; + +starting permutation: s0_update_a s1_tuplock_a_0 s0_commit s1_commit +step s0_update_a: UPDATE a SET i = i + 1; +step s1_tuplock_a_0: + EXPLAIN (VERBOSE, COSTS OFF) + SELECT a.i FROM a, fb, fc WHERE a.i = fb.i AND fb.i = fc.i FOR UPDATE OF a; + SELECT a.i FROM a, fb, fc WHERE a.i = fb.i AND fb.i = fc.i FOR UPDATE OF a; + +step s0_commit: COMMIT; +step s1_tuplock_a_0: <... completed> +QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +LockRows + Output: a.i, a.ctid, fb.*, fc.* + -> Nested Loop + Output: a.i, a.ctid, fb.*, fc.* + Join Filter: (fb.i = a.i) + -> Foreign Scan + Output: fb.*, fb.i, fc.*, fc.i + Relations: (public.fb) INNER JOIN (public.fc) + Remote SQL: SELECT CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2.i) END, r2.i, CASE WHEN (r3.*)::text IS NOT NULL THEN ROW(r3.i) END, r3.i FROM (public.b r2 INNER JOIN public.c r3 ON (((r2.i = r3.i)))) + -> Nested Loop + Output: fb.*, fb.i, fc.*, fc.i + Join Filter: (fb.i = fc.i) + -> Foreign Scan on public.fb + Output: fb.*, fb.i + Remote SQL: SELECT i FROM public.b ORDER BY i ASC NULLS LAST + -> Foreign Scan on public.fc + Output: fc.*, fc.i + Remote SQL: SELECT i FROM public.c + -> Seq Scan on public.a + Output: a.i, a.ctid +(20 rows) + +i +- +(0 rows) + +step s1_commit: COMMIT; + +starting permutation: s0_update_a s1_tuplock_a_1 s0_commit s1_commit +step s0_update_a: UPDATE a SET i = i + 1; +step s1_tuplock_a_1: + EXPLAIN (VERBOSE, COSTS OFF) + SELECT a.i, + (SELECT 1 FROM fb, fc WHERE a.i = fb.i AND fb.i = fc.i) + FROM a FOR UPDATE; + SELECT a.i, + (SELECT 1 FROM fb, fc WHERE a.i = fb.i AND fb.i = fc.i) + FROM a FOR UPDATE; + +step s0_commit: COMMIT; +step s1_tuplock_a_1: <... completed> +QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------- +LockRows + Output: a.i, ((SubPlan 1)), a.ctid + -> Seq Scan on public.a + Output: a.i, (SubPlan 1), a.ctid + SubPlan 1 + -> Foreign Scan + Output: 1 + Relations: (public.fb) INNER JOIN (public.fc) + Remote SQL: SELECT NULL FROM (public.b r1 INNER JOIN public.c r2 ON (((r2.i = $1::integer)) AND ((r1.i = $1::integer)))) +(9 rows) + +i|?column? +-+-------- +2| +(1 row) + +step s1_commit: COMMIT; diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out index 2185b42bb4f79..5fc2a5a9b07d6 100644 --- a/contrib/postgres_fdw/expected/postgres_fdw.out +++ b/contrib/postgres_fdw/expected/postgres_fdw.out @@ -8212,6 +8212,119 @@ DELETE FROM rem1; -- can't be pushed down (5 rows) DROP TRIGGER trig_row_after_delete ON rem1; +-- We are allowed to create transition-table triggers on both kinds of +-- inheritance even if they contain foreign tables as children, but currently +-- collecting transition tuples from such foreign tables is not supported. +CREATE TABLE local_tbl (a text, b int); +CREATE FOREIGN TABLE foreign_tbl (a text, b int) + SERVER loopback OPTIONS (table_name 'local_tbl'); +INSERT INTO foreign_tbl VALUES ('AAA', 42); +-- Test case for partition hierarchy +CREATE TABLE parent_tbl (a text, b int) PARTITION BY LIST (a); +ALTER TABLE parent_tbl ATTACH PARTITION foreign_tbl FOR VALUES IN ('AAA'); +CREATE TRIGGER parent_tbl_insert_trig + AFTER INSERT ON parent_tbl REFERENCING NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); +CREATE TRIGGER parent_tbl_update_trig + AFTER UPDATE ON parent_tbl REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); +CREATE TRIGGER parent_tbl_delete_trig + AFTER DELETE ON parent_tbl REFERENCING OLD TABLE AS old_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); +INSERT INTO parent_tbl VALUES ('AAA', 42); +ERROR: cannot collect transition tuples from child foreign tables +COPY parent_tbl (a, b) FROM stdin; +ERROR: cannot collect transition tuples from child foreign tables +CONTEXT: COPY parent_tbl, line 1: "AAA 42" +ALTER SERVER loopback OPTIONS (ADD batch_size '10'); +INSERT INTO parent_tbl VALUES ('AAA', 42); +ERROR: cannot collect transition tuples from child foreign tables +COPY parent_tbl (a, b) FROM stdin; +ERROR: cannot collect transition tuples from child foreign tables +CONTEXT: COPY parent_tbl, line 1: "AAA 42" +ALTER SERVER loopback OPTIONS (DROP batch_size); +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE parent_tbl SET b = b + 1; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Update on public.parent_tbl + Foreign Update on public.foreign_tbl parent_tbl_1 + Remote SQL: UPDATE public.local_tbl SET b = $2 WHERE ctid = $1 + -> Foreign Scan on public.foreign_tbl parent_tbl_1 + Output: (parent_tbl_1.b + 1), parent_tbl_1.tableoid, parent_tbl_1.ctid, parent_tbl_1.* + Remote SQL: SELECT a, b, ctid FROM public.local_tbl FOR UPDATE +(6 rows) + +UPDATE parent_tbl SET b = b + 1; +ERROR: cannot collect transition tuples from child foreign tables +EXPLAIN (VERBOSE, COSTS OFF) +DELETE FROM parent_tbl; + QUERY PLAN +------------------------------------------------------------------ + Delete on public.parent_tbl + Foreign Delete on public.foreign_tbl parent_tbl_1 + Remote SQL: DELETE FROM public.local_tbl WHERE ctid = $1 + -> Foreign Scan on public.foreign_tbl parent_tbl_1 + Output: parent_tbl_1.tableoid, parent_tbl_1.ctid + Remote SQL: SELECT ctid FROM public.local_tbl FOR UPDATE +(6 rows) + +DELETE FROM parent_tbl; +ERROR: cannot collect transition tuples from child foreign tables +ALTER TABLE parent_tbl DETACH PARTITION foreign_tbl; +DROP TABLE parent_tbl; +-- Test case for non-partition hierarchy +CREATE TABLE parent_tbl (a text, b int); +ALTER FOREIGN TABLE foreign_tbl INHERIT parent_tbl; +CREATE TRIGGER parent_tbl_update_trig + AFTER UPDATE ON parent_tbl REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); +CREATE TRIGGER parent_tbl_delete_trig + AFTER DELETE ON parent_tbl REFERENCING OLD TABLE AS old_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE parent_tbl SET b = b + 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Update on public.parent_tbl + Update on public.parent_tbl parent_tbl_1 + Foreign Update on public.foreign_tbl parent_tbl_2 + Remote SQL: UPDATE public.local_tbl SET b = $2 WHERE ctid = $1 + -> Result + Output: (parent_tbl.b + 1), parent_tbl.tableoid, parent_tbl.ctid, (NULL::record) + -> Append + -> Seq Scan on public.parent_tbl parent_tbl_1 + Output: parent_tbl_1.b, parent_tbl_1.tableoid, parent_tbl_1.ctid, NULL::record + -> Foreign Scan on public.foreign_tbl parent_tbl_2 + Output: parent_tbl_2.b, parent_tbl_2.tableoid, parent_tbl_2.ctid, parent_tbl_2.* + Remote SQL: SELECT a, b, ctid FROM public.local_tbl FOR UPDATE +(12 rows) + +UPDATE parent_tbl SET b = b + 1; +ERROR: cannot collect transition tuples from child foreign tables +EXPLAIN (VERBOSE, COSTS OFF) +DELETE FROM parent_tbl; + QUERY PLAN +------------------------------------------------------------------------ + Delete on public.parent_tbl + Delete on public.parent_tbl parent_tbl_1 + Foreign Delete on public.foreign_tbl parent_tbl_2 + Remote SQL: DELETE FROM public.local_tbl WHERE ctid = $1 + -> Append + -> Seq Scan on public.parent_tbl parent_tbl_1 + Output: parent_tbl_1.tableoid, parent_tbl_1.ctid + -> Foreign Scan on public.foreign_tbl parent_tbl_2 + Output: parent_tbl_2.tableoid, parent_tbl_2.ctid + Remote SQL: SELECT ctid FROM public.local_tbl FOR UPDATE +(10 rows) + +DELETE FROM parent_tbl; +ERROR: cannot collect transition tuples from child foreign tables +ALTER FOREIGN TABLE foreign_tbl NO INHERIT parent_tbl; +DROP TABLE parent_tbl; +-- Cleanup +DROP FOREIGN TABLE foreign_tbl; +DROP TABLE local_tbl; -- =================================================================== -- test inheritance features -- =================================================================== @@ -12515,7 +12628,7 @@ ALTER SERVER loopback2 OPTIONS (DROP parallel_abort); -- =================================================================== CREATE TABLE analyze_table (id int, a text, b bigint); CREATE FOREIGN TABLE analyze_ftable (id int, a text, b bigint) - SERVER loopback OPTIONS (table_name 'analyze_rtable1'); + SERVER loopback OPTIONS (table_name 'analyze_table'); INSERT INTO analyze_table (SELECT x FROM generate_series(1,1000) x); ANALYZE analyze_table; SET default_statistics_target = 10; @@ -12523,15 +12636,15 @@ ANALYZE analyze_table; ALTER SERVER loopback OPTIONS (analyze_sampling 'invalid'); ERROR: invalid value for string option "analyze_sampling": invalid ALTER SERVER loopback OPTIONS (analyze_sampling 'auto'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; ALTER SERVER loopback OPTIONS (SET analyze_sampling 'system'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; ALTER SERVER loopback OPTIONS (SET analyze_sampling 'bernoulli'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; ALTER SERVER loopback OPTIONS (SET analyze_sampling 'random'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; ALTER SERVER loopback OPTIONS (SET analyze_sampling 'off'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; -- cleanup DROP FOREIGN TABLE analyze_ftable; DROP TABLE analyze_table; diff --git a/contrib/postgres_fdw/meson.build b/contrib/postgres_fdw/meson.build index 8b29be24deeb7..c96030ca08984 100644 --- a/contrib/postgres_fdw/meson.build +++ b/contrib/postgres_fdw/meson.build @@ -41,6 +41,12 @@ tests += { ], 'regress_args': ['--dlpath', meson.build_root() / 'src/test/regress'], }, + 'isolation': { + 'specs': [ + 'eval_plan_qual', + ], + 'regress_args': ['--load-extension=postgres_fdw'], + }, 'tap': { 'tests': [ 't/001_auth_scram.pl', diff --git a/contrib/postgres_fdw/option.c b/contrib/postgres_fdw/option.c index c2f936640bca8..6d5795b9fa9ea 100644 --- a/contrib/postgres_fdw/option.c +++ b/contrib/postgres_fdw/option.c @@ -531,7 +531,7 @@ process_pgfdw_appname(const char *appname) appendStringInfoString(&buf, application_name); break; case 'c': - appendStringInfo(&buf, INT64_HEX_FORMAT ".%x", MyStartTime, MyProcPid); + appendStringInfo(&buf, "%" PRIx64 ".%x", MyStartTime, MyProcPid); break; case 'C': appendStringInfoString(&buf, cluster_name); diff --git a/contrib/postgres_fdw/specs/eval_plan_qual.spec b/contrib/postgres_fdw/specs/eval_plan_qual.spec new file mode 100644 index 0000000000000..9f52270db6984 --- /dev/null +++ b/contrib/postgres_fdw/specs/eval_plan_qual.spec @@ -0,0 +1,102 @@ +# Tests for the EvalPlanQual mechanism involving foreign tables + +setup +{ + DO $d$ + BEGIN + EXECUTE $$CREATE SERVER loopback FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (dbname '$$||current_database()||$$', + port '$$||current_setting('port')||$$', + use_remote_estimate 'true' + )$$; + END; + $d$; + CREATE USER MAPPING FOR PUBLIC SERVER loopback; + + CREATE TABLE l (i int, v text); + CREATE TABLE t (i int, v text); + CREATE FOREIGN TABLE ft (i int, v text) SERVER loopback OPTIONS (table_name 't'); + + INSERT INTO l VALUES (123, 'foo'), (456, 'bar'), (789, 'baz'); + INSERT INTO t SELECT i, to_char(i, 'FM0000') FROM generate_series(1, 1000) i; + CREATE INDEX t_idx ON t (i); + ANALYZE l, t; + + CREATE TABLE a (i int); + CREATE TABLE b (i int); + CREATE TABLE c (i int); + CREATE FOREIGN TABLE fb (i int) SERVER loopback OPTIONS (table_name 'b'); + CREATE FOREIGN TABLE fc (i int) SERVER loopback OPTIONS (table_name 'c'); + + INSERT INTO a VALUES (1); + INSERT INTO b VALUES (1); + INSERT INTO c VALUES (1); + ANALYZE a, b, c; +} + +teardown +{ + DROP TABLE l; + DROP TABLE t; + DROP TABLE a; + DROP TABLE b; + DROP TABLE c; + DROP SERVER loopback CASCADE; +} + +session s0 +setup { BEGIN ISOLATION LEVEL READ COMMITTED; } +step s0_update_l { UPDATE l SET i = i + 1; } +step s0_update_a { UPDATE a SET i = i + 1; } +step s0_commit { COMMIT; } + +session s1 +setup { BEGIN ISOLATION LEVEL READ COMMITTED; } + +# Test for EPQ with a foreign scan pushing down a qual +step s1_tuplock_l_0 { + EXPLAIN (VERBOSE, COSTS OFF) + SELECT l.* FROM l, ft WHERE l.i = ft.i AND l.i = 123 FOR UPDATE OF l; + SELECT l.* FROM l, ft WHERE l.i = ft.i AND l.i = 123 FOR UPDATE OF l; +} + +# Same test, except that the qual is parameterized +step s1_tuplock_l_1 { + EXPLAIN (VERBOSE, COSTS OFF) + SELECT l.* FROM l, ft WHERE l.i = ft.i AND l.v = 'foo' FOR UPDATE OF l; + SELECT l.* FROM l, ft WHERE l.i = ft.i AND l.v = 'foo' FOR UPDATE OF l; +} + +# Test for EPQ with a foreign scan pushing down a join +step s1_tuplock_a_0 { + EXPLAIN (VERBOSE, COSTS OFF) + SELECT a.i FROM a, fb, fc WHERE a.i = fb.i AND fb.i = fc.i FOR UPDATE OF a; + SELECT a.i FROM a, fb, fc WHERE a.i = fb.i AND fb.i = fc.i FOR UPDATE OF a; +} + +# Same test, except that the join is contained in a SubLink sub-select, not +# in the main query +step s1_tuplock_a_1 { + EXPLAIN (VERBOSE, COSTS OFF) + SELECT a.i, + (SELECT 1 FROM fb, fc WHERE a.i = fb.i AND fb.i = fc.i) + FROM a FOR UPDATE; + SELECT a.i, + (SELECT 1 FROM fb, fc WHERE a.i = fb.i AND fb.i = fc.i) + FROM a FOR UPDATE; +} + +step s1_commit { COMMIT; } + +# This test checks the case of rechecking a pushed-down qual. +permutation s0_update_l s1_tuplock_l_0 s0_commit s1_commit + +# This test checks the same case, except that the qual is parameterized. +permutation s0_update_l s1_tuplock_l_1 s0_commit s1_commit + +# This test checks the case of rechecking a pushed-down join. +permutation s0_update_a s1_tuplock_a_0 s0_commit s1_commit + +# This test exercises EvalPlanQual with a SubLink sub-select (which should +# be unaffected by any EPQ recheck behavior in the outer query). +permutation s0_update_a s1_tuplock_a_1 s0_commit s1_commit diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql index e534b40de3c76..19bfc7b991067 100644 --- a/contrib/postgres_fdw/sql/postgres_fdw.sql +++ b/contrib/postgres_fdw/sql/postgres_fdw.sql @@ -2272,6 +2272,84 @@ EXPLAIN (verbose, costs off) DELETE FROM rem1; -- can't be pushed down DROP TRIGGER trig_row_after_delete ON rem1; + +-- We are allowed to create transition-table triggers on both kinds of +-- inheritance even if they contain foreign tables as children, but currently +-- collecting transition tuples from such foreign tables is not supported. + +CREATE TABLE local_tbl (a text, b int); +CREATE FOREIGN TABLE foreign_tbl (a text, b int) + SERVER loopback OPTIONS (table_name 'local_tbl'); + +INSERT INTO foreign_tbl VALUES ('AAA', 42); + +-- Test case for partition hierarchy +CREATE TABLE parent_tbl (a text, b int) PARTITION BY LIST (a); +ALTER TABLE parent_tbl ATTACH PARTITION foreign_tbl FOR VALUES IN ('AAA'); + +CREATE TRIGGER parent_tbl_insert_trig + AFTER INSERT ON parent_tbl REFERENCING NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); +CREATE TRIGGER parent_tbl_update_trig + AFTER UPDATE ON parent_tbl REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); +CREATE TRIGGER parent_tbl_delete_trig + AFTER DELETE ON parent_tbl REFERENCING OLD TABLE AS old_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); + +INSERT INTO parent_tbl VALUES ('AAA', 42); + +COPY parent_tbl (a, b) FROM stdin; +AAA 42 +\. + +ALTER SERVER loopback OPTIONS (ADD batch_size '10'); + +INSERT INTO parent_tbl VALUES ('AAA', 42); + +COPY parent_tbl (a, b) FROM stdin; +AAA 42 +\. + +ALTER SERVER loopback OPTIONS (DROP batch_size); + +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE parent_tbl SET b = b + 1; +UPDATE parent_tbl SET b = b + 1; + +EXPLAIN (VERBOSE, COSTS OFF) +DELETE FROM parent_tbl; +DELETE FROM parent_tbl; + +ALTER TABLE parent_tbl DETACH PARTITION foreign_tbl; +DROP TABLE parent_tbl; + +-- Test case for non-partition hierarchy +CREATE TABLE parent_tbl (a text, b int); +ALTER FOREIGN TABLE foreign_tbl INHERIT parent_tbl; + +CREATE TRIGGER parent_tbl_update_trig + AFTER UPDATE ON parent_tbl REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); +CREATE TRIGGER parent_tbl_delete_trig + AFTER DELETE ON parent_tbl REFERENCING OLD TABLE AS old_table + FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func(); + +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE parent_tbl SET b = b + 1; +UPDATE parent_tbl SET b = b + 1; + +EXPLAIN (VERBOSE, COSTS OFF) +DELETE FROM parent_tbl; +DELETE FROM parent_tbl; + +ALTER FOREIGN TABLE foreign_tbl NO INHERIT parent_tbl; +DROP TABLE parent_tbl; + +-- Cleanup +DROP FOREIGN TABLE foreign_tbl; +DROP TABLE local_tbl; + -- =================================================================== -- test inheritance features -- =================================================================== @@ -4278,7 +4356,7 @@ ALTER SERVER loopback2 OPTIONS (DROP parallel_abort); CREATE TABLE analyze_table (id int, a text, b bigint); CREATE FOREIGN TABLE analyze_ftable (id int, a text, b bigint) - SERVER loopback OPTIONS (table_name 'analyze_rtable1'); + SERVER loopback OPTIONS (table_name 'analyze_table'); INSERT INTO analyze_table (SELECT x FROM generate_series(1,1000) x); ANALYZE analyze_table; @@ -4289,19 +4367,19 @@ ANALYZE analyze_table; ALTER SERVER loopback OPTIONS (analyze_sampling 'invalid'); ALTER SERVER loopback OPTIONS (analyze_sampling 'auto'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; ALTER SERVER loopback OPTIONS (SET analyze_sampling 'system'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; ALTER SERVER loopback OPTIONS (SET analyze_sampling 'bernoulli'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; ALTER SERVER loopback OPTIONS (SET analyze_sampling 'random'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; ALTER SERVER loopback OPTIONS (SET analyze_sampling 'off'); -ANALYZE analyze_table; +ANALYZE analyze_ftable; -- cleanup DROP FOREIGN TABLE analyze_ftable; diff --git a/contrib/sepgsql/.gitignore b/contrib/sepgsql/.gitignore index b1778d05bbd0b..7e240e44c3692 100644 --- a/contrib/sepgsql/.gitignore +++ b/contrib/sepgsql/.gitignore @@ -3,5 +3,7 @@ /sepgsql-regtest.if /sepgsql-regtest.pp /tmp -# Generated by test suite +# Generated subdirectories +/log/ +/results/ /tmp_check/ diff --git a/contrib/sepgsql/expected/ddl.out b/contrib/sepgsql/expected/ddl.out index 7e8deae4f9320..accb903f5cefc 100644 --- a/contrib/sepgsql/expected/ddl.out +++ b/contrib/sepgsql/expected/ddl.out @@ -304,6 +304,8 @@ ALTER TABLE regtest_table_4 ALTER COLUMN y TYPE float; LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" permissive=0 LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" permissive=0 LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" permissive=0 +LINE 1: ALTER TABLE regtest_table_4 ALTER COLUMN y TYPE float; + ^ LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" permissive=0 LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.y" permissive=0 LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.float8(integer)" permissive=0 @@ -388,7 +390,11 @@ ALTER TABLE regtest_ptable_4 ALTER COLUMN y TYPE float; LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" permissive=0 LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" permissive=0 LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" permissive=0 +LINE 1: ALTER TABLE regtest_ptable_4 ALTER COLUMN y TYPE float; + ^ LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" permissive=0 +LINE 1: ALTER TABLE regtest_ptable_4 ALTER COLUMN y TYPE float; + ^ LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" permissive=0 LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_ptable_4.y" permissive=0 LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" permissive=0 diff --git a/contrib/xml2/xpath.c b/contrib/xml2/xpath.c index 23d3f332dbaa7..2820874cb5e97 100644 --- a/contrib/xml2/xpath.c +++ b/contrib/xml2/xpath.c @@ -176,7 +176,7 @@ pgxmlNodeSetToText(xmlNodeSetPtr nodeset, xmlBufferWriteCHAR(buf, toptagname); xmlBufferWriteChar(buf, ">"); } - result = xmlStrdup(buf->content); + result = xmlStrdup(xmlBufferContent(buf)); xmlBufferFree(buf); return result; } diff --git a/doc/src/sgml/amcheck.sgml b/doc/src/sgml/amcheck.sgml index 211a0ae1945bb..0402172a5ff74 100644 --- a/doc/src/sgml/amcheck.sgml +++ b/doc/src/sgml/amcheck.sgml @@ -382,7 +382,7 @@ SET client_min_messages = DEBUG1; verification functions is true, an additional phase of verification is performed against the table associated with the target index relation. This consists of a dummy - CREATE INDEX operation, which checks for the + CREATE INDEX CONCURRENTLY operation, which checks for the presence of all hypothetical new index tuples against a temporary, in-memory summarizing structure (this is built when needed during the basic first phase of verification). The summarizing structure diff --git a/doc/src/sgml/client-auth.sgml b/doc/src/sgml/client-auth.sgml index 832b616a7bbff..51b95ed04f399 100644 --- a/doc/src/sgml/client-auth.sgml +++ b/doc/src/sgml/client-auth.sgml @@ -1003,8 +1003,9 @@ local db1,db2,@demodbs all md5 the remainder of the field is treated as a regular expression. (See for details of PostgreSQL's regular expression syntax.) The regular - expression can include a single capture, or parenthesized subexpression, - which can then be referenced in the database-username + expression can include a single capture, or parenthesized subexpression. + The portion of the system user name that matched the capture can then + be referenced in the database-username field as \1 (backslash-one). This allows the mapping of multiple user names in a single line, which is particularly useful for simple syntax substitutions. For example, these entries @@ -1022,12 +1023,11 @@ mymap /^(.*)@otherdomain\.com$ guest If the database-username field starts with a slash (/), the remainder of the field is treated - as a regular expression (see - for details of PostgreSQL's regular - expression syntax). It is not possible to use \1 - to use a capture from regular expression on - system-username for a regular expression - on database-username. + as a regular expression. + When the database-username field is a regular + expression, it is not possible to use \1 within it to + refer to a capture from the system-username + field. diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 59a0874528a3a..945679575e4a3 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -1680,7 +1680,7 @@ include_dir 'conf.d' This parameter determines whether the passphrase command set by ssl_passphrase_command will also be called during a configuration reload if a key file needs a passphrase. If this - parameter is off (the default), then + parameter is off (the default), then ssl_passphrase_command will be ignored during a reload and the SSL configuration will not be reloaded if a passphrase is needed. That setting is appropriate for a command that requires a @@ -1688,6 +1688,12 @@ include_dir 'conf.d' running. Setting this parameter to on might be appropriate if the passphrase is obtained from a file, for example. + + This parameter must be set to on when running on + Windows since all connections + will perform a configuration reload due to the different process model + of that platform. + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -1760,7 +1766,8 @@ include_dir 'conf.d' Controls whether huge pages are requested for the main shared memory area. Valid values are try (the default), - on, and off. With + on, and off. + This parameter can only be set at server start. With huge_pages set to try, the server will try to request huge pages, but fall back to the default if that fails. With on, failure to request huge pages @@ -2273,6 +2280,7 @@ include_dir 'conf.d' platform, is generally discouraged because it typically requires non-default kernel settings to allow for large allocations (see ). + This parameter can only be set at server start. @@ -2300,6 +2308,7 @@ include_dir 'conf.d' however, it may be useful for debugging, when the pg_dynshmem directory is stored on a RAM disk, or when other shared memory facilities are not available. + This parameter can only be set at server start. @@ -2413,6 +2422,7 @@ include_dir 'conf.d' / queue. The default value is 1048576. For 8 KB pages it allows to consume up to 8 GB of disk space. + This parameter can only be set at server start. @@ -2694,9 +2704,9 @@ include_dir 'conf.d' Controls the largest I/O size in operations that combine I/O, and silently limits the user-settable parameter io_combine_limit. - This parameter can only be set in - the postgresql.conf file or on the server - command line. + This parameter can only be set at server start. + If this value is specified without units, it is taken as blocks, + that is BLCKSZ bytes, typically 8kB. The maximum possible size depends on the operating system and block size, but is typically 1MB on Unix and 128kB on Windows. The default is 128kB. @@ -2716,6 +2726,8 @@ include_dir 'conf.d' higher than the io_max_combine_limit parameter, the lower value will silently be used instead, so both may need to be raised to increase the I/O size. + If this value is specified without units, it is taken as blocks, + that is BLCKSZ bytes, typically 8kB. The maximum possible size depends on the operating system and block size, but is typically 1MB on Unix and 128kB on Windows. The default is 128kB. @@ -3399,8 +3411,9 @@ include_dir 'conf.d' This parameter enables compression of WAL using the specified compression method. When enabled, the PostgreSQL - server compresses full page images written to WAL when - is on or during a base backup. + server compresses full page images written to WAL (e.g. when + is on, during a base backup, + etc.). A compressed page image will be decompressed during WAL replay. The supported methods are pglz, lz4 (if PostgreSQL @@ -3962,6 +3975,7 @@ include_dir 'conf.d' blocks to prefetch. If this value is specified without units, it is taken as bytes. The default is 512kB. + This parameter can only be set at server start. @@ -4618,10 +4632,12 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows - Invalidate replication slots that have remained idle longer than this - duration. If this value is specified without units, it is taken as - minutes. A value of zero (the default) disables the idle timeout - invalidation mechanism. This parameter can only be set in the + Invalidate replication slots that have remained inactive (not used by + a replication connection) + for longer than this duration. + If this value is specified without units, it is taken as seconds. + A value of zero (the default) disables the idle timeout + invalidation mechanism. This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4685,9 +4701,9 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows - Record commit time of transactions. This parameter - can only be set in postgresql.conf file or on the server - command line. The default value is off. + Record commit time of transactions. + This parameter can only be set at server start. + The default value is off. @@ -5904,24 +5920,24 @@ ANY num_sync ( - - PQservicePQservice - - - - Returns the service of the active connection. - - -char *PQservice(const PGconn *conn); - - - - - returns NULL if the - conn argument is NULL. - Otherwise, if there was no service provided, it returns an empty string. - - - - PQttyPQtty diff --git a/doc/src/sgml/logical-replication.sgml b/doc/src/sgml/logical-replication.sgml index c32e6bc000d4d..adb676cb90060 100644 --- a/doc/src/sgml/logical-replication.sgml +++ b/doc/src/sgml/logical-replication.sgml @@ -709,8 +709,8 @@ HINT: To initiate replication, you must manually create the replication slot, e - To confirm that the standby server is indeed ready for failover, follow these - steps to verify that all necessary logical replication slots have been + To confirm that the standby server is indeed ready for failover for a given subscriber, follow these + steps to verify that all the logical replication slots required by that subscriber have been synchronized to the standby server: @@ -764,7 +764,7 @@ HINT: To initiate replication, you must manually create the replication slot, e Check that the logical replication slots identified above exist on the standby server and are ready for failover. -/* standby # */ SELECT slot_name, (synced AND NOT temporary AND NOT conflicting) AS failover_ready +/* standby # */ SELECT slot_name, (synced AND NOT temporary AND invalidation_reason IS NULL) AS failover_ready FROM pg_replication_slots WHERE slot_name IN ('sub1','sub2','sub3', 'pg_16394_sync_16385_7394666715149055164'); @@ -782,10 +782,42 @@ HINT: To initiate replication, you must manually create the replication slot, e If all the slots are present on the standby server and the result (failover_ready) of the above SQL query is true, then - existing subscriptions can continue subscribing to publications now on the - new primary server. + existing subscriptions can continue subscribing to publications on the new + primary server. + + + + The first two steps in the above procedure are meant for a + PostgreSQL subscriber. It is recommended to run + these steps on each subscriber node, that will be served by the designated + standby after failover, to obtain the complete list of replication + slots. This list can then be verified in Step 3 to ensure failover readiness. + Non-PostgreSQL subscribers, on the other hand, may + use their own methods to identify the replication slots used by their + respective subscriptions. + + + + In some cases, such as during a planned failover, it is necessary to confirm + that all subscribers, whether PostgreSQL or + non-PostgreSQL, will be able to continue + replication after failover to a given standby server. In such cases, use the + following SQL, instead of performing the first two steps above, to identify + which replication slots on the primary need to be synced to the standby that + is intended for promotion. This query returns the relevant replication slots + associated with all the failover-enabled subscriptions. + + +/* primary # */ SELECT array_agg(quote_literal(r.slot_name)) AS slots + FROM pg_replication_slots r + WHERE r.failover AND NOT r.temporary; + slots +------- + {'sub1','sub2','sub3', 'pg_16394_sync_16385_7394666715149055164'} +(1 row) + @@ -1016,28 +1048,28 @@ HINT: To initiate replication, you must manually create the replication slot, e defined) for each publication. 5) AND (c = 'NSW'::text)) + "public.t1" WHERE ((a > 5) AND (c = 'NSW'::text)) - Publication p2 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root -----------+------------+---------+---------+---------+-----------+---------- - postgres | f | t | t | t | t | f + Publication p2 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root +----------+------------+---------+---------+---------+-----------+-------------------+---------- + postgres | f | t | t | t | t | none | f Tables: - "public.t1" - "public.t2" WHERE (e = 99) + "public.t1" + "public.t2" WHERE (e = 99) - Publication p3 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root -----------+------------+---------+---------+---------+-----------+---------- - postgres | f | t | t | t | t | f + Publication p3 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root +----------+------------+---------+---------+---------+-----------+-------------------+---------- + postgres | f | t | t | t | t | none | f Tables: - "public.t2" WHERE (d = 10) - "public.t3" WHERE (g = 10) + "public.t2" WHERE (d = 10) + "public.t3" WHERE (g = 10) ]]> @@ -1459,10 +1491,10 @@ Publications: for each publication. /* pub # */ \dRp+ - Publication p1 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root -----------+------------+---------+---------+---------+-----------+---------- - postgres | f | t | t | t | t | f + Publication p1 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root +----------+------------+---------+---------+---------+-----------+-------------------+---------- + postgres | f | t | t | t | t | none | f Tables: "public.t1" (id, a, b, d) @@ -1776,7 +1808,7 @@ Publications: update_missing - The tuple to be updated was not found. The update will simply be + The row to be updated was not found. The update will simply be skipped in this scenario. @@ -1797,7 +1829,7 @@ Publications: delete_missing - The tuple to be deleted was not found. The delete will simply be + The row to be deleted was not found. The delete will simply be skipped in this scenario. @@ -1831,8 +1863,8 @@ DETAIL: detailed_explanation. where detail_values is one of: Key (column_name , ...)=(column_value , ...) - existing local tuple (column_name , ...)=(column_value , ...) - remote tuple (column_name , ...)=(column_value , ...) + existing local row (column_name , ...)=(column_value , ...) + remote row (column_name , ...)=(column_value , ...) replica identity {(column_name , ...)=(column_value , ...) | full (column_name , ...)=(column_value , ...)} @@ -1866,32 +1898,32 @@ DETAIL: detailed_explanation. detailed_explanation includes the origin, transaction ID, and commit timestamp of the transaction that - modified the existing local tuple, if available. + modified the existing local row, if available. The Key section includes the key values of the local - tuple that violated a unique constraint for + row that violated a unique constraint for insert_exists, update_exists or multiple_unique_conflicts conflicts. - The existing local tuple section includes the local - tuple if its origin differs from the remote tuple for + The existing local row section includes the local + row if its origin differs from the remote row for update_origin_differs or delete_origin_differs - conflicts, or if the key value conflicts with the remote tuple for + conflicts, or if the key value conflicts with the remote row for insert_exists, update_exists or multiple_unique_conflicts conflicts. - The remote tuple section includes the new tuple from + The remote row section includes the new row from the remote insert or update operation that caused the conflict. Note that - for an update operation, the column value of the new tuple will be null + for an update operation, the column value of the new row will be null if the value is unchanged and toasted. @@ -1899,7 +1931,7 @@ DETAIL: detailed_explanation. The replica identity section includes the replica identity key values that were used to search for the existing local - tuple to be updated or deleted. This may include the full tuple value + row to be updated or deleted. This may include the full row value if the local relation is marked with REPLICA IDENTITY FULL. @@ -1907,7 +1939,7 @@ DETAIL: detailed_explanation. column_name is the column name. - For existing local tuple, remote tuple, + For existing local row, remote row, and replica identity full cases, column names are logged only if the user lacks the privilege to access all columns of the table. If column names are present, they appear in the same order @@ -1964,7 +1996,7 @@ DETAIL: detailed_explanation. ERROR: conflict detected on relation "public.test": conflict=insert_exists DETAIL: Key already exists in unique index "t_pkey", which was modified locally in transaction 740 at 2024-06-26 10:47:04.727375+08. -Key (c)=(1); existing local tuple (1, 'local'); remote tuple (1, 'remote'). +Key (c)=(1); existing local row (1, 'local'); remote row (1, 'remote'). CONTEXT: processing remote data for replication origin "pg_16395" during "INSERT" for replication target relation "public.test" in transaction 725 finished at 0/14C0378 The LSN of the transaction that contains the change violating the constraint and diff --git a/doc/src/sgml/logicaldecoding.sgml b/doc/src/sgml/logicaldecoding.sgml index fc288d691b9f6..e84bfbce71ac2 100644 --- a/doc/src/sgml/logicaldecoding.sgml +++ b/doc/src/sgml/logicaldecoding.sgml @@ -290,7 +290,7 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU A logical slot will emit each change just once in normal operation. The current position of each slot is persisted only at checkpoint, so in - the case of a crash the slot may return to an earlier LSN, which will + the case of a crash the slot might return to an earlier LSN, which will then cause recent changes to be sent again when the server restarts. Logical decoding clients are responsible for avoiding ill effects from handling the same message more than once. Clients may wish to record @@ -409,7 +409,7 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU should be used with caution. Unlike automatic synchronization, it does not include cyclic retries, making it more prone to synchronization failures, particularly during initial sync scenarios where the required WAL files - or catalog rows for the slot may have already been removed or are at risk + or catalog rows for the slot might have already been removed or are at risk of being removed on the standby. In contrast, automatic synchronization via sync_replication_slots provides continuous slot updates, enabling seamless failover and supporting high availability. @@ -420,18 +420,18 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU When slot synchronization is configured as recommended, and the initial synchronization is performed either automatically or - manually via pg_sync_replication_slot, the standby can persist the - synchronized slot only if the following condition is met: The logical - replication slot on the primary must retain WALs and system catalog - rows that are still available on the standby. This ensures data + manually via pg_sync_replication_slots, the standby + can persist the synchronized slot only if the following condition is met: + The logical replication slot on the primary must retain WALs and system + catalog rows that are still available on the standby. This ensures data integrity and allows logical replication to continue smoothly after promotion. If the required WALs or catalog rows have already been purged from the standby, the slot will not be persisted to avoid data loss. In such cases, the following log message may appear: - LOG: could not synchronize replication slot "failover_slot" - DETAIL: Synchronization could lead to data loss as the remote slot needs WAL at LSN 0/3003F28 and catalog xmin 754, but the standby has LSN 0/3003F28 and catalog xmin 756 +LOG: could not synchronize replication slot "failover_slot" +DETAIL: Synchronization could lead to data loss, because the remote slot needs WAL at LSN 0/3003F28 and catalog xmin 754, but the standby has LSN 0/3003F28 and catalog xmin 756. If the logical replication slot is actively used by a consumer, no manual intervention is needed; the slot will advance automatically, diff --git a/doc/src/sgml/maintenance.sgml b/doc/src/sgml/maintenance.sgml index 600e4b3f2f3b8..1571a895fc812 100644 --- a/doc/src/sgml/maintenance.sgml +++ b/doc/src/sgml/maintenance.sgml @@ -779,7 +779,10 @@ HINT: Execute a database-wide VACUUM in that database. careful aging management, storage cleanup, and wraparound handling. There is a separate storage area which holds the list of members in each multixact, which also uses a 32-bit counter and which must also - be managed. + be managed. The system function + pg_get_multixact_members() described in + can be used to examine the + transaction IDs associated with a multixact ID. @@ -930,12 +933,16 @@ vacuum threshold = Minimum(vacuum max threshold, vacuum base threshold + vacuum The table is also vacuumed if the number of tuples inserted since the last vacuum has exceeded the defined insert threshold, which is defined as: -vacuum insert threshold = vacuum base insert threshold + vacuum insert scale factor * number of tuples +vacuum insert threshold = vacuum base insert threshold + vacuum insert scale factor * number of tuples * percent of table not frozen where the vacuum insert base threshold is , - and vacuum insert scale factor is - . + the vacuum insert scale factor is + , + the number of tuples is + pg_class.reltuples, + and the percent of the table not frozen is + 1 - pg_class.relallfrozen / pg_class.relpages. Such vacuums may allow portions of the table to be marked as all visible and also allow tuples to be frozen, which can reduce the work required in subsequent vacuums. diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index 4265a22d4de35..5ac0e54688cb4 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -3980,6 +3980,7 @@ description | Waiting for a newly initialized WAL file to reach durable storage Estimated number of rows inserted since this table was last vacuumed + (not counting VACUUM FULL) @@ -4066,7 +4067,8 @@ description | Waiting for a newly initialized WAL file to reach durable storage total_vacuum_time double precision - Total time this table has been manually vacuumed, in milliseconds. + Total time this table has been manually vacuumed, in milliseconds + (not counting VACUUM FULL). (This includes the time spent sleeping due to cost-based delays.) @@ -5649,7 +5651,7 @@ FROM pg_stat_get_backend_idset() AS backendid; Total time spent sleeping due to cost-based delay (see - , in milliseconds + ), in milliseconds (if is enabled, otherwise zero). diff --git a/doc/src/sgml/pgbuffercache.sgml b/doc/src/sgml/pgbuffercache.sgml index 537d601494242..6817cf5d21582 100644 --- a/doc/src/sgml/pgbuffercache.sgml +++ b/doc/src/sgml/pgbuffercache.sgml @@ -19,10 +19,18 @@ pg_buffercache_pages + + pg_buffercache_numa + + pg_buffercache_summary + + pg_buffercache_usage_counts + + pg_buffercache_evict @@ -37,12 +45,12 @@ This module provides the pg_buffercache_pages() - function (wrapped in the pg_buffercache view), + function (wrapped in the pg_buffercache view), the pg_buffercache_numa_pages() function (wrapped in the pg_buffercache_numa view), the pg_buffercache_summary() function, the pg_buffercache_usage_counts() function, the - pg_buffercache_evict(), the + pg_buffercache_evict() function, the pg_buffercache_evict_relation() function and the pg_buffercache_evict_all() function. @@ -55,7 +63,7 @@ - The pg_buffercache_numa_pages() provides + The pg_buffercache_numa_pages() function provides NUMA node mappings for shared buffer entries. This information is not part of pg_buffercache_pages() itself, as it is much slower to retrieve. @@ -489,7 +497,7 @@ - The <structname>pg_buffercache_evict_relation</structname> Function + The <function>pg_buffercache_evict_relation()</function> Function The pg_buffercache_evict_relation() function is very similar to the pg_buffercache_evict() function. The @@ -507,7 +515,7 @@ - The <structname>pg_buffercache_evict_all</structname> Function + The <function>pg_buffercache_evict_all()</function> Function The pg_buffercache_evict_all() function is very similar to the pg_buffercache_evict() function. The diff --git a/doc/src/sgml/pgoverexplain.sgml b/doc/src/sgml/pgoverexplain.sgml index 21930fbd3bd76..377ddc8139ecf 100644 --- a/doc/src/sgml/pgoverexplain.sgml +++ b/doc/src/sgml/pgoverexplain.sgml @@ -8,7 +8,7 @@ - The pg_overexplain extends EXPLAIN + The pg_overexplain module extends EXPLAIN with new options that provide additional output. It is mostly intended to assist with debugging of and development of the planner, rather than for general use. Since this module displays internal details of planner data @@ -17,6 +17,21 @@ often as) those data structures change. + + To use it, simply load it into the server. You can load it into an + individual session: + + +LOAD 'pg_overexplain'; + + + You can also preload it into some or all sessions by including + pg_overexplain in + or + in + postgresql.conf. + + EXPLAIN (DEBUG) diff --git a/doc/src/sgml/plpgsql.sgml b/doc/src/sgml/plpgsql.sgml index e937491e6b89e..5492a66f7eedc 100644 --- a/doc/src/sgml/plpgsql.sgml +++ b/doc/src/sgml/plpgsql.sgml @@ -4962,13 +4962,13 @@ $$ LANGUAGE plpgsql; Variable substitution currently works only in SELECT, INSERT, UPDATE, - DELETE, and commands containing one of - these (such as EXPLAIN and CREATE TABLE - ... AS SELECT), - because the main SQL engine allows query parameters only in these - commands. To use a non-constant name or value in other statement - types (generically called utility statements), you must construct - the utility statement as a string and EXECUTE it. + DELETE, MERGE and commands + containing one of these (such as EXPLAIN and + CREATE TABLE ... AS SELECT), because the main SQL + engine allows query parameters only in these commands. To use a + non-constant name or value in other statement types (generically called + utility statements), you must construct the utility statement as a string + and EXECUTE it. diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml index 82fe3f93761dc..f0b29ed8cab9f 100644 --- a/doc/src/sgml/protocol.sgml +++ b/doc/src/sgml/protocol.sgml @@ -537,6 +537,11 @@ The frontend should not respond to this message, but should continue listening for a ReadyForQuery message. + + The PostgreSQL server will always send this + message, but some third party backend implementations of the protocol + that don't support query cancellation are known not to. + @@ -886,6 +891,16 @@ SELCT 1/0; Errors detected at semantic analysis or later, such as a misspelled table or column name, do not have this effect. + + + Lastly, note that all the statements within the Query message will + observe the same value of statement_timestamp(), + since that timestamp is updated only upon receipt of the Query + message. This will result in them all observing the same + value of transaction_timestamp() as well, + except in cases where the query string ends a previously-started + transaction and begins a new one. + @@ -3504,11 +3519,13 @@ psql "dbname=postgres replication=database" -c "IDENTIFY_SYSTEM;" - Boolean option to enable streaming of in-progress transactions. - It accepts an additional value "parallel" to enable sending extra - information with some messages to be used for parallelisation. - Minimum protocol version 2 is required to turn it on. Minimum protocol - version 4 is required for the "parallel" option. + Option to enable streaming of in-progress transactions. Valid values are + off (the default), on and + parallel. The setting parallel + enables sending extra information with some messages to be used for + parallelization. Minimum protocol version 2 is required to turn it + on. Minimum protocol version 4 is required for the + parallel value. @@ -4146,7 +4163,7 @@ psql "dbname=postgres replication=database" -c "IDENTIFY_SYSTEM;" message, indicated by the length field. - The maximum key length is 256 bytes. The + The minimum and maximum key length are 4 and 256 bytes, respectively. The PostgreSQL server only sends keys up to 32 bytes, but the larger maximum size allows for future server versions, as well as connection poolers and other middleware, to use @@ -4337,7 +4354,7 @@ psql "dbname=postgres replication=database" -c "IDENTIFY_SYSTEM;" - Int32(16) + Int32 Length of message contents in bytes, including self. @@ -6081,13 +6098,13 @@ psql "dbname=postgres replication=database" -c "IDENTIFY_SYSTEM;" - Int32(196608) + Int32(196610) The protocol version number. The most significant 16 bits are the major version number (3 for the protocol described here). The least significant 16 bits are the minor version number - (0 for the protocol described here). + (2 for the protocol described here). diff --git a/doc/src/sgml/query.sgml b/doc/src/sgml/query.sgml index 727a0cb185fb2..b190f28d41ea6 100644 --- a/doc/src/sgml/query.sgml +++ b/doc/src/sgml/query.sgml @@ -264,8 +264,18 @@ COPY weather FROM '/home/user/weather.txt'; where the file name for the source file must be available on the machine running the backend process, not the client, since the backend process - reads the file directly. You can read more about the - COPY command in . + reads the file directly. The data inserted above into the weather table + could also be inserted from a file containing (values are separated by a + tab character): + + +San Francisco 46 50 0.25 1994-11-27 +San Francisco 43 57 0.0 1994-11-29 +Hayward 37 54 \N 1994-11-29 + + + You can read more about the COPY command in + . diff --git a/doc/src/sgml/ref/alter_publication.sgml b/doc/src/sgml/ref/alter_publication.sgml index d5ea383e8bc95..d7f1a5a722941 100644 --- a/doc/src/sgml/ref/alter_publication.sgml +++ b/doc/src/sgml/ref/alter_publication.sgml @@ -23,15 +23,24 @@ PostgreSQL documentation ALTER PUBLICATION name ADD publication_object [, ...] ALTER PUBLICATION name SET publication_object [, ...] -ALTER PUBLICATION name DROP publication_object [, ...] +ALTER PUBLICATION name DROP publication_drop_object [, ...] ALTER PUBLICATION name SET ( publication_parameter [= value] [, ... ] ) ALTER PUBLICATION name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER PUBLICATION name RENAME TO new_name where publication_object is one of: - TABLE [ ONLY ] table_name [ * ] [ ( column_name [, ... ] ) ] [ WHERE ( expression ) ] [, ... ] + TABLE table_and_columns [, ... ] TABLES IN SCHEMA { schema_name | CURRENT_SCHEMA } [, ... ] + +and publication_drop_object is one of: + + TABLE [ ONLY ] table_name [ * ] [, ... ] + TABLES IN SCHEMA { schema_name | CURRENT_SCHEMA } [, ... ] + +and table_and_columns is: + + [ ONLY ] table_name [ * ] [ ( column_name [, ... ] ) ] [ WHERE ( expression ) ] @@ -57,8 +66,7 @@ ALTER PUBLICATION name RENAME TO DROP TABLES IN SCHEMA will not drop any schema tables that were specified using FOR TABLE/ - ADD TABLE, and the combination of DROP - with a WHERE clause is not allowed. + ADD TABLE. diff --git a/doc/src/sgml/ref/alter_table.sgml b/doc/src/sgml/ref/alter_table.sgml index d16969916835d..2a7d4b84f83c6 100644 --- a/doc/src/sgml/ref/alter_table.sgml +++ b/doc/src/sgml/ref/alter_table.sgml @@ -210,6 +210,8 @@ WITH ( MODULUS numeric_literal, REM When this form is used, the column's statistics are removed, so running ANALYZE on the table afterwards is recommended. + For a virtual generated column, ANALYZE + is not necessary because such columns never have statistics. @@ -240,9 +242,10 @@ WITH ( MODULUS numeric_literal, REM provided none of the records in the table contain a NULL value for the column. Ordinarily this is checked during the ALTER TABLE by scanning the - entire table; however, if a valid CHECK constraint is - found which proves no NULL can exist, then the - table scan is skipped. + entire table, unless NOT VALID is specified; + however, if a valid CHECK constraint exists + (and is not dropped in the same command) which proves no + NULL can exist, then the table scan is skipped. If a column has an invalid not-null constraint, SET NOT NULL validates it. @@ -270,6 +273,15 @@ WITH ( MODULUS numeric_literal, REM in a stored generated column is rewritten and all the future changes will apply the new generation expression. + + + When this form is used on a stored generated column, its statistics + are removed, so running + ANALYZE + on the table afterwards is recommended. + For a virtual generated column, ANALYZE + is not necessary because such columns never have statistics. + @@ -364,24 +376,22 @@ WITH ( MODULUS numeric_literal, REM n_distinct_inherited, which override the number-of-distinct-values estimates made by subsequent ANALYZE - operations. n_distinct affects the statistics for the table - itself, while n_distinct_inherited affects the statistics - gathered for the table plus its inheritance children. When set to a - positive value, ANALYZE will assume that the column contains - exactly the specified number of distinct nonnull values. When set to a - negative value, which must be greater - than or equal to -1, ANALYZE will assume that the number of - distinct nonnull values in the column is linear in the size of the - table; the exact count is to be computed by multiplying the estimated - table size by the absolute value of the given number. For example, - a value of -1 implies that all values in the column are distinct, while - a value of -0.5 implies that each value appears twice on the average. - This can be useful when the size of the table changes over time, since - the multiplication by the number of rows in the table is not performed - until query planning time. Specify a value of 0 to revert to estimating - the number of distinct values normally. For more information on the use - of statistics by the PostgreSQL query - planner, refer to . + operations. n_distinct affects the statistics for the + table itself, while n_distinct_inherited affects the + statistics gathered for the table plus its inheritance children, and for + the statistics gathered for partitioned tables. When the value + specified is a positive value, the query planner will assume that the + column contains exactly the specified number of distinct nonnull values. + Fractional values may also be specified by using values below 0 and + above or equal to -1. This instructs the query planner to estimate the + number of distinct values by multiplying the absolute value of the + specified number by the estimated number of rows in the table. For + example, a value of -1 implies that all values in the column are + distinct, while a value of -0.5 implies that each value appears twice on + average. This can be useful when the size of the table changes over + time. For more information on the use of statistics by the + PostgreSQL query planner, refer to + . Changing per-attribute options acquires a diff --git a/doc/src/sgml/ref/create_policy.sgml b/doc/src/sgml/ref/create_policy.sgml index e76c342d3da67..42d43ad7bf414 100644 --- a/doc/src/sgml/ref/create_policy.sgml +++ b/doc/src/sgml/ref/create_policy.sgml @@ -49,6 +49,8 @@ CREATE POLICY name ON WITH CHECK. When a USING expression returns true for a given row then that row is visible to the user, while if false or null is returned then the row is not visible. + Typically, no error occurs when a row is not visible, but see + for exceptions. When a WITH CHECK expression returns true for a row then that row is inserted or updated, while if false or null is returned then an error occurs. @@ -194,8 +196,9 @@ CREATE POLICY name ON SELECT), and will not be available for modification (in an UPDATE - or DELETE). Such rows are silently suppressed; no error - is reported. + or DELETE). Typically, such rows are silently + suppressed; no error is reported (but see + for exceptions). @@ -251,8 +254,10 @@ CREATE POLICY name ON INSERT or UPDATE command attempts to add rows to the table that do not pass the ALL - policy's WITH CHECK expression, the entire - command will be aborted. + policy's WITH CHECK expression (or its + USING expression, if it does not have a + WITH CHECK expression), the entire command will + be aborted. @@ -268,11 +273,50 @@ CREATE POLICY name ON SELECT policy will be returned during a SELECT query, and that queries that require SELECT permissions, such as - UPDATE, will also only see those records + UPDATE, DELETE, and + MERGE, will also only see those records that are allowed by the SELECT policy. A SELECT policy cannot have a WITH CHECK expression, as it only applies in cases where - records are being retrieved from the relation. + records are being retrieved from the relation, except as described + below. + + + If a data-modifying query has a RETURNING clause, + SELECT permissions are required on the relation, + and any newly inserted or updated rows from the relation must satisfy + the relation's SELECT policies in order to be + available to the RETURNING clause. If a newly + inserted or updated row does not satisfy the relation's + SELECT policies, an error will be thrown (inserted + or updated rows to be returned are never + silently ignored). + + + If an INSERT has an ON CONFLICT DO + NOTHING/UPDATE clause, SELECT + permissions are required on the relation, and the rows proposed for + insertion are checked using the relation's SELECT + policies. If a row proposed for insertion does not satisfy the + relation's SELECT policies, an error is thrown + (the INSERT is never silently + avoided). In addition, if the UPDATE path is + taken, the row to be updated and the new updated row are checked + against the relation's SELECT policies, and an + error is thrown if they are not satisfied (an auxiliary + UPDATE is never silently + avoided). + + + A MERGE command requires SELECT + permissions on both the source and target relations, and so each + relation's SELECT policies are applied before they + are joined, and the MERGE actions will only see + those records that are allowed by those policies. In addition, if + an UPDATE action is executed, the target relation's + SELECT policies are applied to the updated row, as + for a standalone UPDATE, except that an error is + thrown if they are not satisfied. @@ -292,10 +336,11 @@ CREATE POLICY name ON - Note that INSERT with ON CONFLICT DO - UPDATE checks INSERT policies' - WITH CHECK expressions only for rows appended - to the relation by the INSERT path. + Note that an INSERT with an ON CONFLICT + DO NOTHING/UPDATE clause will check the + INSERT policies' WITH CHECK + expressions for all rows proposed for insertion, regardless of + whether or not they end up being inserted. @@ -305,12 +350,12 @@ CREATE POLICY name ON Using UPDATE for a policy means that it will apply - to UPDATE, SELECT FOR UPDATE + to UPDATE, SELECT FOR UPDATE, and SELECT FOR SHARE commands, as well as auxiliary ON CONFLICT DO UPDATE clauses of - INSERT commands. - MERGE commands containing UPDATE - actions are affected as well. Since UPDATE + INSERT commands, and MERGE + commands containing UPDATE actions. + Since an UPDATE command involves pulling an existing record and replacing it with a new modified record, UPDATE policies accept both a USING expression and @@ -356,7 +401,8 @@ CREATE POLICY name ON USING expressions, an error will be thrown (the UPDATE path will never be silently - avoided). + avoided). The same applies to an UPDATE action + of a MERGE command. @@ -366,12 +412,18 @@ CREATE POLICY name ON Using DELETE for a policy means that it will apply - to DELETE commands. Only rows that pass this - policy will be seen by a DELETE command. There can - be rows that are visible through a SELECT that are - not available for deletion, if they do not pass the - USING expression for - the DELETE policy. + to DELETE commands and MERGE + commands containing DELETE actions. For a + DELETE command, only rows that pass this policy + will be seen by the DELETE command. There can + be rows that are visible through a SELECT policy + that are not available for deletion, if they do not pass the + USING expression for the DELETE + policy. Note, however, that a DELETE action in a + MERGE command will see rows that are visible + through SELECT policies, and if the + DELETE policy does not pass for such a row, an + error will be thrown. @@ -400,6 +452,15 @@ CREATE POLICY name ON + + summarizes how the different + types of policy apply to specific commands. In the table, + check means that the policy expression is checked and an + error is thrown if it returns false or null, whereas filter + means that the row is silently ignored if the policy expression returns + false or null. + + Policies Applied by Command Type @@ -424,8 +485,8 @@ CREATE POLICY name ON - SELECT - Existing row + SELECT / COPY ... TO + Filter existing row @@ -433,63 +494,117 @@ CREATE POLICY name ON SELECT FOR UPDATE/SHARE - Existing row + Filter existing row - Existing row + Filter existing row - INSERT / MERGE ... THEN INSERT + INSERT + + Check new row  + + If read access is required to either the existing or new row (for + example, a WHERE or RETURNING + clause that refers to columns from the relation). + + + + Check new row - New row + + + UPDATE + + Filter existing row  & + check new row  + + + Filter existing row + Check new row - INSERT ... RETURNING + DELETE - New row + Filter existing row  + + + + + Filter existing row + + + INSERT ... ON CONFLICT + + Check new row  - If read access is required to the existing or new row (for example, - a WHERE or RETURNING clause - that refers to columns from the relation). + Row proposed for insertion is checked regardless of whether or not a + conflict occurs. - New row + + Check new row  + - UPDATE / MERGE ... THEN UPDATE + ON CONFLICT DO UPDATE - Existing & new rows + Check existing & new rows  + + New row of the auxiliary UPDATE command, which + might be different from the new row of the original + INSERT command. + + - Existing row - New row + Check existing row + + Check new row  + - DELETE + MERGE + Filter source & target rows + + + + + + + MERGE ... THEN INSERT - Existing row + Check new row  + Check new row - Existing row - ON CONFLICT DO UPDATE - Existing & new rows + MERGE ... THEN UPDATE + Check new row + + Check existing row + Check new row + + + + MERGE ... THEN DELETE + + - Existing row - New row + Check existing row diff --git a/doc/src/sgml/ref/create_publication.sgml b/doc/src/sgml/ref/create_publication.sgml index 802630f2df116..092a141dd87b3 100644 --- a/doc/src/sgml/ref/create_publication.sgml +++ b/doc/src/sgml/ref/create_publication.sgml @@ -28,8 +28,12 @@ CREATE PUBLICATION namewhere publication_object is one of: - TABLE [ ONLY ] table_name [ * ] [ ( column_name [, ... ] ) ] [ WHERE ( expression ) ] [, ... ] + TABLE table_and_columns [, ... ] TABLES IN SCHEMA { schema_name | CURRENT_SCHEMA } [, ... ] + +and table_and_columns is: + + [ ONLY ] table_name [ * ] [ ( column_name [, ... ] ) ] [ WHERE ( expression ) ] diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml index dc000e913c143..927d541781451 100644 --- a/doc/src/sgml/ref/create_table.sgml +++ b/doc/src/sgml/ref/create_table.sgml @@ -1379,8 +1379,8 @@ WITH ( MODULUS numeric_literal, REM REFERENCES (foreign key) constraints accept this clause. NOT NULL and CHECK constraints are not deferrable. Note that deferrable constraints cannot be used as - conflict arbitrators in an INSERT statement that - includes an ON CONFLICT DO UPDATE clause. + conflict arbiters in an INSERT statement that + includes an ON CONFLICT clause. diff --git a/doc/src/sgml/ref/create_trigger.sgml b/doc/src/sgml/ref/create_trigger.sgml index 982ab6f3ee450..71869c996e3dc 100644 --- a/doc/src/sgml/ref/create_trigger.sgml +++ b/doc/src/sgml/ref/create_trigger.sgml @@ -197,9 +197,11 @@ CREATE [ OR REPLACE ] [ CONSTRAINT ] TRIGGER name of the rows inserted, deleted, or modified by the current SQL statement. This feature lets the trigger see a global view of what the statement did, not just one row at a time. This option is only allowed for - an AFTER trigger that is not a constraint trigger; also, if - the trigger is an UPDATE trigger, it must not specify - a column_name list. + an AFTER trigger on a plain table (not a foreign table). + The trigger should not be a constraint trigger. Also, if the trigger is + an UPDATE trigger, it must not specify + a column_name list when using + this option. OLD TABLE may only be specified once, and only for a trigger that can fire on UPDATE or DELETE; it creates a transition relation containing the before-images of all rows diff --git a/doc/src/sgml/ref/drop_owned.sgml b/doc/src/sgml/ref/drop_owned.sgml index 46e1c229ec0fb..efda01a39e88b 100644 --- a/doc/src/sgml/ref/drop_owned.sgml +++ b/doc/src/sgml/ref/drop_owned.sgml @@ -33,7 +33,7 @@ DROP OWNED BY { name | CURRENT_ROLE database that are owned by one of the specified roles. Any privileges granted to the given roles on objects in the current database or on shared objects (databases, tablespaces, configuration - parameters, or other roles) will also be revoked. + parameters) will also be revoked. diff --git a/doc/src/sgml/ref/insert.sgml b/doc/src/sgml/ref/insert.sgml index 3f13991779050..b337f2ee55586 100644 --- a/doc/src/sgml/ref/insert.sgml +++ b/doc/src/sgml/ref/insert.sgml @@ -594,6 +594,15 @@ INSERT INTO table_name [ AS + + + While CREATE INDEX CONCURRENTLY or REINDEX + CONCURRENTLY is running on a unique index, INSERT + ... ON CONFLICT statements on the same table may unexpectedly + fail with a unique violation. + + + diff --git a/doc/src/sgml/ref/pg_combinebackup.sgml b/doc/src/sgml/ref/pg_combinebackup.sgml index 330a598f7013e..9a6d201e0b8e3 100644 --- a/doc/src/sgml/ref/pg_combinebackup.sgml +++ b/doc/src/sgml/ref/pg_combinebackup.sgml @@ -314,7 +314,7 @@ PostgreSQL documentation To avoid this problem, taking a new full backup after changing the checksum - state of the cluster using is + state of the cluster using is recommended. Otherwise, you can disable and then optionally reenable checksums on the directory produced by pg_combinebackup in order to correct the problem. diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml index 2ae084b5fa6fc..f1b076ec46eb9 100644 --- a/doc/src/sgml/ref/pg_dump.sgml +++ b/doc/src/sgml/ref/pg_dump.sgml @@ -96,6 +96,18 @@ PostgreSQL documentation light of the limitations listed below. + + + Restoring a dump causes the destination to execute arbitrary code of the + source superusers' choice. Partial dumps and partial restores do not limit + that. If the source superusers are not trusted, the dumped SQL statements + must be inspected before restoring. Non-plain-text dumps can be inspected + by using pg_restore's + option. Note that the client running the dump and restore need not trust + the source or destination superusers. + + + @@ -1252,6 +1264,29 @@ PostgreSQL documentation + + + + + Use the provided string as the psql + \restrict key in the dump output. This can only be + specified for plain-text dumps, i.e., when is + set to plain or the option + is omitted. If no restrict key is specified, + pg_dump will generate a random one as + needed. Keys may contain only alphanumeric characters. + + + This option is primarily intended for testing purposes and other + scenarios that require repeatable output (e.g., comparing dump files). + It is not recommended for general use, as a malicious server with + advance knowledge of the key may be able to inject arbitrary code that + will be executed on the machine that runs + psql with the dump output. + + + + @@ -1354,12 +1389,21 @@ PostgreSQL documentation + + + + + Dump optimizer statistics. + + + + Dump only the statistics, not the schema (data definitions) or data. - Statistics for tables, materialized views, foreign tables, + Optimizer statistics for tables, materialized views, foreign tables, and indexes are dumped. @@ -1440,33 +1484,6 @@ PostgreSQL documentation - - - - - Dump data. This is the default. - - - - - - - - - Dump schema (data definitions). This is the default. - - - - - - - - - Dump statistics. - - - - @@ -1682,11 +1699,12 @@ CREATE DATABASE foo WITH TEMPLATE template0; - If is specified, + When is specified, pg_dump will include most optimizer statistics in the - resulting dump file. However, some statistics may not be included, such as - those created explicitly with or - custom statistics added by an extension. Therefore, it may be useful to + resulting dump file. This does not include all statistics, such as + those created explicitly with , + custom statistics added by an extension, or statistics collected by the + cumulative statistics system. Therefore, it may still be useful to run ANALYZE after restoring from a dump file to ensure optimal performance; see and for more information. diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml index 8ca68da5a5560..8834b7ec141ea 100644 --- a/doc/src/sgml/ref/pg_dumpall.sgml +++ b/doc/src/sgml/ref/pg_dumpall.sgml @@ -16,10 +16,7 @@ PostgreSQL documentation pg_dumpall - - - export a PostgreSQL database cluster as an SQL script or to other formats - + extract a PostgreSQL database cluster into a script file @@ -36,7 +33,7 @@ PostgreSQL documentation pg_dumpall is a utility for writing out (dumping) all PostgreSQL databases - of a cluster into an SQL script file or an archive. The output contains + of a cluster into one script file. The script file contains SQL commands that can be used as input to to restore the databases. It does this by calling for each database in the cluster. @@ -55,16 +52,11 @@ PostgreSQL documentation - Plain text SQL scripts will be written to the standard output. Use the + The SQL script will be written to the standard output. Use the / option or shell operators to redirect it into a file. - - Archives in other formats will be placed in a directory named using the - /, which is required in this case. - - pg_dumpall needs to connect several times to the PostgreSQL server (once per @@ -74,6 +66,16 @@ PostgreSQL documentation linkend="libpq-pgpass"/> for more information. + + + Restoring a dump causes the destination to execute arbitrary code of the + source superusers' choice. Partial dumps and partial restores do not limit + that. If the source superusers are not trusted, the dumped SQL statements + must be inspected before restoring. Note that the client running the dump + and restore need not trust the source or destination superusers. + + + @@ -129,85 +131,10 @@ PostgreSQL documentation Send output to the specified file. If this is omitted, the standard output is used. - Note: This option can only be omitted when is plain - - - - - - Specify the format of dump files. In plain format, all the dump data is - sent in a single text stream. This is the default. - - In all other modes, pg_dumpall first creates two files: - global.dat and map.dat, in the directory - specified by . - The first file contains global data, such as roles and tablespaces. The second - contains a mapping between database oids and names. These files are used by - pg_restore. Data for individual databases is placed in - databases subdirectory, named using the database's oid. - - - - d - directory - - - Output directory-format archives for each database, - suitable for input into pg_restore. The directory - will have database oid as its name. - - - - - - p - plain - - - Output a plain-text SQL script file (the default). - - - - - - c - custom - - - Output a custom-format archive for each database, - suitable for input into pg_restore. The archive - will be named dboid.dmp where dboid is the - oid of the database. - - - - - - t - tar - - - Output a tar-format archive for each database, - suitable for input into pg_restore. The archive - will be named dboid.tar where dboid is the - oid of the database. - - - - - - - Note: see for details - of how the various non plain text archives work. - - - - - @@ -674,6 +601,26 @@ exclude database PATTERN + + + + + Use the provided string as the psql + \restrict key in the dump output. If no restrict + key is specified, pg_dumpall will generate a + random one as needed. Keys may contain only alphanumeric characters. + + + This option is primarily intended for testing purposes and other + scenarios that require repeatable output (e.g., comparing dump files). + It is not recommended for general use, as a malicious server with + advance knowledge of the key may be able to inject arbitrary code that + will be executed on the machine that runs + psql with the dump output. + + + + @@ -688,12 +635,21 @@ exclude database PATTERN + + + + + Dump optimizer statistics. + + + + Dump only the statistics, not the schema (data definitions) or data. - Statistics for tables, materialized views, foreign tables, + Optimizer statistics for tables, materialized views, foreign tables, and indexes are dumped. @@ -723,33 +679,6 @@ exclude database PATTERN - - - - - Dump data. This is the default. - - - - - - - - - Dump schema (data definitions). This is the default. - - - - - - - - - Dump statistics. - - - - @@ -961,11 +890,12 @@ exclude database PATTERN - If is specified, + When is specified, pg_dumpall will include most optimizer statistics in the - resulting dump file. However, some statistics may not be included, such as - those created explicitly with or - custom statistics added by an extension. Therefore, it may be useful to + resulting dump file. This does not include all statistics, such as + those created explicitly with , + custom statistics added by an extension, or statistics collected by the + cumulative statistics system. Therefore, it may still be useful to run ANALYZE on each database after restoring from a dump file to ensure optimal performance. You can also run vacuumdb -a -z to analyze all databases. diff --git a/doc/src/sgml/ref/pg_recvlogical.sgml b/doc/src/sgml/ref/pg_recvlogical.sgml index f68182266a9fa..263ebdeeab4a8 100644 --- a/doc/src/sgml/ref/pg_recvlogical.sgml +++ b/doc/src/sgml/ref/pg_recvlogical.sgml @@ -53,6 +53,16 @@ PostgreSQL documentation (ControlC) or SIGTERM signal. + + + When pg_recvlogical receives + a SIGHUP signal, it closes the current output file + and opens a new one using the filename specified by + the option. This allows us to rotate + the output file by first renaming the current file and then sending + a SIGHUP signal to + pg_recvlogical. + diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml index b649bd3a5ae0f..a468a38361a13 100644 --- a/doc/src/sgml/ref/pg_restore.sgml +++ b/doc/src/sgml/ref/pg_restore.sgml @@ -18,9 +18,8 @@ PostgreSQL documentation pg_restore - restore PostgreSQL databases from archives - created by pg_dump or - pg_dumpall + restore a PostgreSQL database from an + archive file created by pg_dump @@ -39,14 +38,13 @@ PostgreSQL documentation pg_restore is a utility for restoring a - PostgreSQL database or cluster from an archive - created by or - in one of the non-plain-text + PostgreSQL database from an archive + created by in one of the non-plain-text formats. It will issue the commands necessary to reconstruct the - database or cluster to the state it was in at the time it was saved. The - archives also allow pg_restore to + database to the state it was in at the time it was saved. The + archive files also allow pg_restore to be selective about what is restored, or even to reorder the items - prior to being restored. The archive formats are designed to be + prior to being restored. The archive files are designed to be portable across architectures. @@ -54,17 +52,10 @@ PostgreSQL documentation pg_restore can operate in two modes. If a database name is specified, pg_restore connects to that database and restores archive contents directly into - the database. - When restoring from a dump made by pg_dumpall, - each database will be created and then the restoration will be run in that - database. - - Otherwise, when a database name is not specified, a script containing the SQL - commands necessary to rebuild the database or cluster is created and written + the database. Otherwise, a script containing the SQL + commands necessary to rebuild the database is created and written to a file or standard output. This script output is equivalent to - the plain text output format of pg_dump or - pg_dumpall. - + the plain text output format of pg_dump. Some of the options controlling the output are therefore analogous to pg_dump options. @@ -77,6 +68,18 @@ PostgreSQL documentation pg_restore will not be able to load the data using COPY statements. + + + + Restoring a dump causes the destination to execute arbitrary code of the + source superusers' choice. Partial dumps and partial restores do not limit + that. If the source superusers are not trusted, the dumped SQL statements + must be inspected before restoring. Non-plain-text dumps can be inspected + by using pg_restore's + option. Note that the client running the dump and restore need not trust + the source or destination superusers. + + @@ -149,8 +152,6 @@ PostgreSQL documentation commands that mention this database. Access privileges for the database itself are also restored, unless is specified. - is required when restoring multiple databases - from an archive created by pg_dumpall. @@ -246,19 +247,6 @@ PostgreSQL documentation - - - - - - Restore only global objects (roles and tablespaces), no databases. - - - This option is only relevant when restoring from an archive made using pg_dumpall. - - - - @@ -603,28 +591,6 @@ PostgreSQL documentation - - - - - Do not restore databases whose name matches - pattern. - Multiple patterns can be excluded by writing multiple - switches. The - pattern parameter is - interpreted as a pattern according to the same rules used by - psql's \d - commands (see ), - so multiple databases can also be excluded by writing wildcard - characters in the pattern. When using wildcards, be careful to - quote the pattern if needed to prevent shell wildcard expansion. - - - This option is only relevant when restoring from an archive made using pg_dumpall. - - - - @@ -842,6 +808,28 @@ PostgreSQL documentation + + + + + Use the provided string as the psql + \restrict key in the dump output. This can only be + specified for SQL script output, i.e., when the + option is used. If no restrict key is specified, + pg_restore will generate a random one as + needed. Keys may contain only alphanumeric characters. + + + This option is primarily intended for testing purposes and other + scenarios that require repeatable output (e.g., comparing dump files). + It is not recommended for general use, as a malicious server with + advance knowledge of the key may be able to inject arbitrary code that + will be executed on the machine that runs + psql with the dump output. + + + + @@ -861,6 +849,16 @@ PostgreSQL documentation + + + + + Output commands to restore statistics, if the archive contains them. + This is the default. + + + + @@ -919,36 +917,6 @@ PostgreSQL documentation - - - - - Output commands to restore data, if the archive contains them. - This is the default. - - - - - - - - - Output commands to restore schema (data definitions), if the archive - contains them. This is the default. - - - - - - - - - Output commands to restore statistics, if the archive contains them. - This is the default. - - - - diff --git a/doc/src/sgml/ref/pgupgrade.sgml b/doc/src/sgml/ref/pgupgrade.sgml index aeeed297437e6..1445579429fd8 100644 --- a/doc/src/sgml/ref/pgupgrade.sgml +++ b/doc/src/sgml/ref/pgupgrade.sgml @@ -70,6 +70,14 @@ PostgreSQL documentation pg_upgrade supports upgrades from 9.2.X and later to the current major release of PostgreSQL, including snapshot and beta releases. + + + + Upgrading a cluster causes the destination to execute arbitrary code of the + source superusers' choice. Ensure that the source superusers are trusted + before upgrading. + + @@ -825,10 +833,10 @@ psql --username=postgres --file=script.sql postgres Unless the option is specified, pg_upgrade will transfer most optimizer statistics - from the old cluster to the new cluster. However, some statistics may - not be transferred, such as those created explicitly with or custom statistics added by an - extension. + from the old cluster to the new cluster. This does not transfer + all statistics, such as those created explicitly with + , custom statistics added by + an extension, or statistics collected by the cumulative statistics system. diff --git a/doc/src/sgml/ref/psql-ref.sgml b/doc/src/sgml/ref/psql-ref.sgml index 95f4cac2467e3..bc273f8dce548 100644 --- a/doc/src/sgml/ref/psql-ref.sgml +++ b/doc/src/sgml/ref/psql-ref.sgml @@ -3551,6 +3551,24 @@ SELECT $1 \parse stmt1 + + \restrict restrict_key + + + Enter "restricted" mode with the provided key. In this mode, the only + allowed meta-command is \unrestrict, to exit + restricted mode. The key may contain only alphanumeric characters. + + + This command is primarily intended for use in plain-text dumps + generated by pg_dump, + pg_dumpall, and + pg_restore, but it may be useful elsewhere. + + + + + \s [ filename ] @@ -3802,6 +3820,24 @@ SELECT 1 \bind \sendpipeline + + \unrestrict restrict_key + + + Exit "restricted" mode (i.e., where all other meta-commands are + blocked), provided the specified key matches the one given to + \restrict when restricted mode was entered. + + + This command is primarily intended for use in plain-text dumps + generated by pg_dump, + pg_dumpall, and + pg_restore, but it may be useful elsewhere. + + + + + \unset name diff --git a/doc/src/sgml/ref/vacuumdb.sgml b/doc/src/sgml/ref/vacuumdb.sgml index b0680a61814cc..bfa3cf560968b 100644 --- a/doc/src/sgml/ref/vacuumdb.sgml +++ b/doc/src/sgml/ref/vacuumdb.sgml @@ -282,14 +282,24 @@ PostgreSQL documentation Only analyze relations that are missing statistics for a column, index - expression, or extended statistics object. This option prevents - vacuumdb from deleting existing statistics - so that the query optimizer's choices do not become transiently worse. + expression, or extended statistics object. When used with + , this option prevents + vacuumdb from temporarily replacing existing + statistics with ones generated with lower statistics targets, thus + avoiding transiently worse query optimizer choices. This option can only be used in conjunction with or . + + Note that requires + SELECT privileges on + pg_statistic + and + pg_statistic_ext_data, + which are restricted to superusers by default. + diff --git a/doc/src/sgml/regress.sgml b/doc/src/sgml/regress.sgml index bf4ffb3057636..8838fe7f0225f 100644 --- a/doc/src/sgml/regress.sgml +++ b/doc/src/sgml/regress.sgml @@ -285,75 +285,88 @@ make check-world PG_TEST_EXTRA='kerberos ldap ssl load_balance libpq_encryption' - sepgsql + libpq_encryption - Runs the test suite under contrib/sepgsql. This - requires an SELinux environment that is set up in a specific way; see - . + Runs the test src/interfaces/libpq/t/005_negotiate_encryption.pl. + This opens TCP/IP listen sockets. If PG_TEST_EXTRA + also includes kerberos, additional tests that require + an MIT Kerberos installation are enabled. - ssl + load_balance - Runs the test suite under src/test/ssl. This opens TCP/IP listen sockets. + Runs the test src/interfaces/libpq/t/004_load_balance_dns.pl. + This requires editing the system hosts file and + opens TCP/IP listen sockets. - load_balance + oauth - Runs the test src/interfaces/libpq/t/004_load_balance_dns.pl. - This requires editing the system hosts file and - opens TCP/IP listen sockets. + Runs the test suite under src/test/modules/oauth_validator. + This opens TCP/IP listen sockets for a test server running HTTPS. - libpq_encryption + regress_dump_restore - Runs the test src/interfaces/libpq/t/005_negotiate_encryption.pl. - This opens TCP/IP listen sockets. If PG_TEST_EXTRA - also includes kerberos, additional tests that require - an MIT Kerberos installation are enabled. + Runs an additional test suite in + src/bin/pg_upgrade/t/002_pg_upgrade.pl which + cycles the regression database through pg_dump/ + pg_restore. Not enabled by default because it + is resource intensive. - wal_consistency_checking + sepgsql - Uses wal_consistency_checking=all while running - certain tests under src/test/recovery. Not - enabled by default because it is resource intensive. + Runs the test suite under contrib/sepgsql. This + requires an SELinux environment that is set up in a specific way; see + . - xid_wraparound + ssl - Runs the test suite under src/test/modules/xid_wraparound. - Not enabled by default because it is resource intensive. + Runs the test suite under src/test/ssl. This opens TCP/IP listen sockets. - oauth + wal_consistency_checking - Runs the test suite under src/test/modules/oauth_validator. - This opens TCP/IP listen sockets for a test server running HTTPS. + Uses wal_consistency_checking=all while running + certain tests under src/test/recovery. Not + enabled by default because it is resource intensive. + + + + + + xid_wraparound + + + Runs the test suite under src/test/modules/xid_wraparound. + Not enabled by default because it is resource intensive. diff --git a/doc/src/sgml/release-18.sgml b/doc/src/sgml/release-18.sgml index 66a6817a2be0f..f35082861e0c3 100644 --- a/doc/src/sgml/release-18.sgml +++ b/doc/src/sgml/release-18.sgml @@ -1,12 +1,1201 @@ + + Release 18.1 + + + Release date: + 2025-11-13 + + + + This release contains a variety of fixes from 18.0. + For information about new features in major release 18, see + . + + + + Migration to Version 18.1 + + + A dump/restore is not required for those running 18.X. + + + + + Changes + + + + + + + Check for CREATE privileges on the schema + in CREATE STATISTICS (Jelte Fennema-Nio) + § + + + + This omission allowed table owners to create statistics in any + schema, potentially leading to unexpected naming conflicts. + + + + The PostgreSQL Project thanks + Jelte Fennema-Nio for reporting this problem. + (CVE-2025-12817) + + + + + + + Avoid integer overflow in allocation-size calculations + within libpq (Jacob Champion) + § + + + + Several places in libpq were not + sufficiently careful about computing the required size of a memory + allocation. Sufficiently large inputs could cause integer overflow, + resulting in an undersized buffer, which would then lead to writing + past the end of the buffer. + + + + The PostgreSQL Project thanks Aleksey + Solovev of Positive Technologies for reporting this problem. + (CVE-2025-12818) + + + + + + + Prevent unrecognized node type errors when a SQL/JSON + function such as JSON_VALUE has + a DEFAULT clause containing + a COLLATE expression (Jian He) + § + § + + + + + + + Avoid incorrect optimization of + variable-free HAVING clauses with grouping sets + (Richard Guo) + § + § + + + + + + + Do not use parallelism in hash right semi joins (Richard Guo) + § + + + + The case does not work reliably due to a race condition in updating + the join's shared hash table. + + + + + + + Avoid possible division-by-zero when creating ordered-append plans + (Richard Guo) + § + + + + This mistake could result in incorrect selection of the cheapest + path, or in an assertion failure in debug builds. + + + + + + + Fix planner failure with index types that can do ordered access but + not index-only scans (Maxime Schoemans) + § + + + + This oversight resulted in errors like no data returned for + index-only scan. The case does not arise with any in-core + index type, but some extensions encountered the problem. + + + + + + + Remove faulty assertion in btree index cleanup (Peter Geoghegan) + § + + + + + + + Avoid possible out-of-memory or invalid memory alloc request + size failures during parallel GIN index build (Tomas Vondra) + § + + + + + + + Ensure that BRIN autosummarization provides a snapshot for index + expressions that need one (Álvaro Herrera) + § + § + + + + Previously, autosummarization would fail for such indexes, and then + leave placeholder index tuples behind, causing the index to bloat + over time. + + + + + + + Fix integer-overflow hazard in BRIN index scans when the table + contains close to 232 pages (Sunil S) + § + + + + This oversight could result in an infinite loop or scanning of + unneeded table pages. + + + + + + + Fix incorrect zero-extension of stored values in JIT-generated tuple + deforming code (David Rowley) + § + + + + When not using JIT, the equivalent code does sign-extension not + zero-extension, leading to a different Datum representation of small + integer data types. This inconsistency was masked in most cases, + but it is known to lead to could not find memoization table + entry errors when using Memoize plan nodes, and there might + be other symptoms. + + + + + + + Fix rare crash when processing hashed GROUPING + SETS queries (David Rowley) + § + + + + + + + Repair faulty hash-table-size-choosing logic in hash joins + (Tomas Vondra) + § + + + + Hash joins sometimes used more memory than intended, or failed to + divide it in an efficient way. + + + + + + + Improve relation lookup logic in statistics manipulation functions + (Nathan Bossart) + § + § + + + + Fix pg_restore_relation_stats(), + pg_clear_relation_stats(), + pg_restore_attribute_stats(), and + pg_clear_attribute_stats() to check + privileges before acquiring lock on the target relation + rather than after. + + + + + + + Fix incorrect logic for caching result-relation information for + triggers (David Rowley, Amit Langote) + § + + + + In cases where partitions' column sets aren't physically identical + to their parent partitioned tables' column sets, this oversight + could lead to crashes. + + + + + + + Fix crash during EvalPlanQual rechecks on partitioned tables (David + Rowley, Amit Langote) + § + + + + + + + Fix EvalPlanQual handling of foreign or custom joins that do not + have an alternative local-join plan prepared for EPQ (Masahiko + Sawada, Etsuro Fujita) + § + + + + In such cases the foreign or custom access method should be invoked + normally, but that did not happen, typically leading to a crash. + + + + + + + Avoid duplicating hash partition constraints during DETACH + CONCURRENTLY (Haiyang Li) + § + + + + ALTER TABLE DETACH PARTITION CONCURRENTLY was + written to add a copy of the partitioning constraint to the + now-detached partition. This was misguided, partially because + non-concurrent DETACH doesn't do that, but mostly + because in the case of hash partitioning the constraint expression + contains references to the parent table's OID. That causes problems + during dump/restore, or if the parent table is dropped + after DETACH. In v19 and later, we'll no longer + create any such copied constraints at all. In released branches, to + minimize the risk of unforeseen consequences, only skip adding a + copied constraint if it is for hash partitioning. + + + + + + + Disallow generated columns in partition keys + (Jian He, Ashutosh Bapat) + § + + + + This was already not allowed, but the check missed some cases, such + as where the column reference is implicit in a whole-row reference. + + + + + + + Disallow generated columns in COPY ... FROM + ... WHERE clauses (Peter Eisentraut, Jian He) + § + + + + Previously, incorrect behavior or an obscure error message resulted + from attempting to reference such a column, since generated columns + have not yet been computed at the point + where WHERE filtering is done. + + + + + + + Prevent setting a column as identity if it has a not-null constraint + but the constraint is marked as invalid (Jian He) + § + + + + Identity columns must be not-null, but the check for that missed + this edge case. + + + + + + + Avoid potential use-after-free in parallel vacuum (Kevin Oommen + Anish) + § + + + + This bug seems to have no consequences in standard builds, but it's + theoretically a hazard. + + + + + + + Fix visibility checking for statistics objects + in pg_temp (Noah Misch) + § + + + + A statistics object located in a temporary schema cannot be named + without schema qualification, + but pg_statistics_obj_is_visible() missed that + memo and could return true regardless. In turn, + functions such as pg_describe_object() could + fail to schema-qualify the object's name as expected. + + + + + + + Fix minor memory leak during WAL replay of database creation + (Nathan Bossart) + § + + + + + + + Fix incorrect reporting of replication lag + in pg_stat_replication view (Fujii Masao) + § + + + + If any standby server's replay LSN stopped advancing, + the write_lag + and flush_lag columns would eventually + stop updating. + + + + + + + Avoid duplicative log messages about + invalid primary_slot_name settings (Fujii Masao) + § + + + + + + + Avoid failures when synchronized_standby_slots + references nonexistent replication slots (Shlok Kyal) + § + + + + + + + Remove the unfinished slot state file after failing to write a + replication slot's state to disk (Michael Paquier) + § + + + + Previously, a failure such as out-of-disk-space resulted in leaving + a temporary state.tmp file behind. That's + problematic because it would block all subsequent attempts to + write the state, requiring manual intervention to clean up. + + + + + + + Fix mishandling of lock timeout signals in parallel apply workers + for logical replication (Hayato Kuroda) + § + + + + The same signal number was being used for both worker shutdown and + lock timeout, leading to confusion. + + + + + + + Avoid unwanted WAL receiver shutdown when switching from streaming + to archive WAL source (Xuneng Zhou) + § + + + + During a timeline change, a standby server's WAL receiver should + remain alive, waiting for a new WAL streaming start point. Instead + it was repeatedly shutting down and immediately getting restarted, + which could confuse status monitoring code. + + + + + + + Fix use-after-free issue in the relation synchronization cache + maintained by the pgoutput logical + decoding plugin (Vignesh C, Masahiko Sawada) + § + + + + An error during logical decoding could result in crashes in + subsequent logical decoding attempts in the same session. + The case is only reachable when pgoutput + is invoked via SQL functions. + + + + + + + Avoid unnecessary invalidation of logical replication slots + (Bertrand Drouvot) + § + + + + + + + Re-establish special case for C collation in + locale setup (Jeff Davis) + § + + + + This fixes a regression in access to shared catalogs early in + backend startup, before a database has been selected. It is not + known to be a problem for any + core PostgreSQL code, but some extensions + were broken. + + + + + + + Fix incorrect printing of messages about failures in checking + whether the user has Windows administrator privilege (Bryan Green) + § + + + + This code would have crashed or at least printed garbage. + No such cases have been reported though, indicating that failure of + these system calls is extremely rare. + + + + + + + Avoid crash when attempting to + test PostgreSQL with certain libsanitizer + options (Emmanuel Sibi, Jacob Champion) + § + + + + + + + Fix false memory-context-checking warnings in debug builds + on 64-bit Windows (David Rowley) + § + + + + + + + Correctly handle GROUP BY DISTINCT in PL/pgSQL + assignment statements (Tom Lane) + § + + + + The parser failed to record the DISTINCT option + in this context, so that the command would act as if it were + plain GROUP BY. + + + + + + + Avoid leaking memory when handling a SQL error within PL/Python + (Tom Lane) + § + + + + This fixes a session-lifespan memory leak introduced in our previous + minor releases. + + + + + + + Fix libpq's handling of socket-related + errors on Windows within its GSSAPI logic (Ning Wu, Tom Lane) + § + + + + The code for encrypting/decrypting transmitted data using GSSAPI did + not correctly recognize error conditions on the connection socket, + since Windows reports those differently than other platforms. This + led to failure to make such connections on Windows. + + + + + + + Fix dumping of non-inherited not-null constraints on inherited table + columns (Dilip Kumar) + § + + + + pg_dump failed to preserve such + constraints when dumping from a pre-v18 server. + + + + + + + Fix pg_dump's sorting of + foreign key constraints (Álvaro Herrera) + § + + + + Ensure consistent ordering of these database objects, as was + already done for other object types. + + + + + + + Fix assorted errors in the data compression logic + in pg_dump + and pg_restore + (Daniel Gustafsson, Tom Lane) + § + § + § + + + + Error checking was missing or incorrect in several places, and there + were also portability issues that would manifest on big-endian + hardware. These problems had been missed because this code is only + used to read compressed TOC files within directory-format + dumps. pg_dump never produces such a + dump; the case can be reached only by manually compressing the TOC + file after the fact, which is a supported thing to do but very + uncommon. + + + + + + + Fix pgbench to error out cleanly if + a COPY operation is started (Anthonin Bonnefoy) + § + + + + pgbench doesn't intend to support this + case, but previously it went into an infinite loop. + + + + + + + Fix pgbench's reporting of multiple + errors (Yugo Nagata) + § + + + + In cases where two successive PQgetResult calls + both fail, pgbench might report the wrong + error message. + + + + + + + In pgbench, fix faulty assertion about + errors in pipeline mode (Yugo Nagata) + § + + + + + + + Fix per-file memory leakage + in pg_combinebackup (Tom Lane) + § + + + + + + + Ensure that contrib/pg_buffercache functions + can be canceled (Satyanarayana Narlapuram, Yuhang Qiu) + § + § + + + + Some code paths were capable of running for a long time without + checking for interrupts. + + + + + + + Fix contrib/pg_prewarm's privilege checks for + indexes (Ayush Vatsa, Nathan Bossart) + § + § + + + + pg_prewarm() requires SELECT + privilege on relations to be prewarmed. However, since indexes have + no SQL privileges of their own, this resulted in non-superusers + being unable to prewarm indexes. Instead, check + for SELECT privilege on the index's table. + + + + + + + In contrib/pg_stat_statements, avoid + crash when two or more constants are marked as having the same + location in the SQL statement text (Sami Imseih, Dmitry Dolgov) + § + + + + + + + Make contrib/pgstattuple more robust about + empty or invalid index pages (Nitin Motiani) + § + + + + Count all-zero pages as free space, and ignore pages that are + invalid according to a check of the page's special-space size. + The code for btree indexes already counted all-zero pages as free, + but the hash and gist code would error out, which has been found to + be much less user-friendly. Similarly, make all three cases agree + on ignoring corrupted pages rather than throwing errors. + + + + + + + Harden our read and write barrier macros to satisfy Clang + (Thomas Munro) + § + + + + We supposed that __atomic_thread_fence() is a + sufficient barrier to prevent the C compiler from re-ordering memory + accesses around it, but it appears that that's not true for Clang, + allowing it to generate incorrect code for at least RISC-V, MIPS, + and LoongArch machines. Add explicit compiler barriers to fix that. + + + + + + + Fix PGXS build infrastructure to support building + NLS po files for extensions (Ryo Matsumura) + § + + + + + + + + Release 18 Release date: - 2025-??-??, CURRENT AS OF 2025-06-20 + 2025-09-25 @@ -21,7 +1210,65 @@ - (to be completed) + An asynchronous I/O (AIO) subsystem that can improve performance of + sequential scans, bitmap heap scans, vacuums, and other operations. + + + + + + pg_upgrade + now retains optimizer statistics. + + + + + + Support for "skip scan" lookups that allow using + multicolumn B-tree indexes in + more cases. + + + + + + uuidv7() + function for generating timestamp-ordered + UUIDs. + + + + + + Virtual + generated columns + that compute their values during read operations. This is now the + default for generated columns. + + + + + + OAuth authentication support. + + + + + + OLD and NEW support for + RETURNING clauses + in , , + , and commands. + + + + + + Temporal constraints, or constraints over ranges, for + PRIMARY KEY, + UNIQUE, and + FOREIGN KEY + constraints. @@ -266,6 +1513,30 @@ Author: Fujii Masao + + + + + Change full text search to use the + default collation provider of the cluster to read configuration files + and dictionaries, rather than always using libc (Peter Eisentraut) + § + + + + Clusters that default to non-libc collation providers (e.g., ICU, + builtin) that behave differently than libc for characters processed + by LC_CTYPE could observe changes in behavior of some full-text + search functions, as well as the extension. + When upgrading such clusters using , it + is recommended to reindex all indexes related to full-text search + and pg_trgm after the upgrade. + + + @@ -583,8 +1854,10 @@ Author: Peter Geoghegan - This allows multi-column btree indexes to be used by queries that - only equality-reference the second or later indexed columns. + This allows multi-column btree indexes to be used in more cases such + as when there are no restrictions on the first or early indexed + columns (or there are non-equality ones), and there are useful + restrictions on later indexed columns. @@ -1181,15 +2454,18 @@ Author: Álvaro Herrera 2025-03-18 [62d712ecf] Introduce squashing of constant lists in query jumbling Author: Álvaro Herrera 2025-03-27 [9fbd53dea] Remove the query_id_squash_values GUC +Author: Álvaro Herrera +Branch: master Release: REL_18_BR [c2da1a5d6] 2025-06-24 19:36:32 +0200 --> Have query id computation - of arrays consider only the first and last array elements (Dmitry + of constant lists consider only the first and last constants (Dmitry Dolgov, Sami Imseih) § § + § @@ -1521,7 +2797,7 @@ Author: Amit Kapila - Allow inactive replication slots to be automatically invalided using + Allow inactive replication slots to be automatically invalidated using server variable (Nisha Moond, Bharath Rupireddy) § @@ -1864,21 +3140,27 @@ Author: Thomas Munro Allow the specification of non-overlapping PRIMARY - KEY and UNIQUE + KEY, + UNIQUE, and + foreign key constraints (Paul A. Jungwirth) § + § - This is specified by WITHOUT OVERLAPS on the - last specified column. + This is specified by WITHOUT OVERLAPS for + PRIMARY KEY and UNIQUE, and by + PERIOD for foreign keys, all applied to the last + specified column. @@ -1930,6 +3212,8 @@ Author: Peter Eisentraut @@ -1940,6 +3224,7 @@ Author: Álvaro Herrera linkend="catalog-pg-constraint">pg_constraint (Álvaro Herrera, Bernd Helmle) § + § @@ -2626,20 +3911,6 @@ Author: Heikki Linnakangas - - - - - Add libpq function PQservice() - to return the connection service name (Michael Banck) - § - - - @@ -2738,6 +4011,7 @@ Author: Michael Paquier Allow psql to parse, bind, and close named prepared statements (Anthonin Bonnefoy, Michael Paquier) § + § @@ -2972,8 +4246,9 @@ Author: Nathan Bossart - This option can only be used by - and . + This option can only be run by superusers and can only + be used with options and + . @@ -3048,37 +4323,19 @@ Author: Masahiko Sawada - - - - - Allow to dump in the same output - formats as pg_dump supports (Mahendra - Singh Thalor, Andrew Dunstan) - § - - - - Also modify to handle such dumps. - Previously pg_dumpall only supported - text format. - - - - Add options - , , - and (Jeff Davis) + Add option + (Jeff Davis) § + § @@ -3285,13 +4542,16 @@ Author: Amit Kapila Add pg_createsubscriber option - to remove publications (Shubham Khanna) + to remove publications (Shubham Khanna) § + § @@ -3317,9 +4577,14 @@ Author: Masahiko Sawada Add option - to specify failover slots (Hayato Kuroda) + to specify failover slots (Hayato Kuroda) § + + + Also add option as a synonym + for , and deprecate the latter. +