diff --git a/.devcontainer/requirements.txt b/.devcontainer/requirements.txt index b1f8d285750..2b63abacfea 100644 --- a/.devcontainer/requirements.txt +++ b/.devcontainer/requirements.txt @@ -1,9 +1,9 @@ -black==24.3.0 +black==26.3.1 click==8.1.7 isort==5.12.0 mypy-extensions==1.0.0 packaging==23.2 -pathspec==0.11.2 +pathspec==1.0.0 platformdirs==4.0.0 tomli==2.0.1 typing_extensions==4.8.0 diff --git a/.devcontainer/src/test/regress/Pipfile b/.devcontainer/src/test/regress/Pipfile index 8ade5491338..fbb172573da 100644 --- a/.devcontainer/src/test/regress/Pipfile +++ b/.devcontainer/src/test/regress/Pipfile @@ -4,7 +4,7 @@ url = "https://pypi.python.org/simple" verify_ssl = true [packages] -mitmproxy = {git = "https://github.com/citusdata/mitmproxy.git", ref = "main"} +mitmproxy = {git = "https://github.com/citusdata/mitmproxy.git", ref = "70bad9a3c098f605e5f8b25553e5db5334018ff1"} "aioquic" = ">=1.2.0,<1.3.0" "mitmproxy-rs" = ">=0.12.6,<0.13.0" argon2-cffi = ">=23.1.0" @@ -12,12 +12,12 @@ bcrypt = ">=4.1.2" brotli = "<=1.2.0" h11 = "==0.16.0" h2 = "==4.3.0" -tornado = ">=6.5.1,<6.6.0" +tornado = ">=6.5.5,<6.6.0" zstandard = ">=0.25.0" construct = "*" docopt = "==0.6.2" -cryptography = "==44.0.3" -pytest = "*" +cryptography = "==46.0.7" +pytest = "==9.0.3" psycopg = "*" filelock = "*" pytest-asyncio = "*" @@ -25,12 +25,12 @@ pytest-timeout = "*" pytest-xdist = "*" pytest-repeat = "*" pyyaml = "*" -werkzeug = "==3.1.4" +werkzeug = "==3.1.6" "typing-extensions" = ">=4.13.2,<5" pyperclip = "==1.9.0" [dev-packages] -black = "==24.10.0" +black = "==26.3.1" isort = "*" flake8 = "*" flake8-bugbear = "*" diff --git a/.devcontainer/src/test/regress/Pipfile.lock b/.devcontainer/src/test/regress/Pipfile.lock index 5b662d4343f..ca68abfdbd9 100644 --- a/.devcontainer/src/test/regress/Pipfile.lock +++ b/.devcontainer/src/test/regress/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "5f734fff88a49010712613a14addae5f38347c691d0b3004b09f59ba5c7cc061" + "sha256": "912f8e179377d7d34b800be62a5b10c0fa6ecdebc2413d7002f8340679b4c6be" }, "pipfile-spec": 6, "requires": { @@ -92,19 +92,19 @@ }, "asgiref": { "hashes": [ - "sha256:aef8a81283a34d0ab31630c9b7dfe70c812c95eba78171367ca8745e88124734", - "sha256:d89f2d8cd8b56dada7d52fa7dc8075baa08fb836560710d38c292a7a3f78c04e" + "sha256:13acff32519542a1736223fb79a715acdebe24286d98e8b164a73085f40da2c4", + "sha256:1db9021efadb0d9512ce8ffaf72fcef601c7b73a8807a1bb2ef143dc6b14846d" ], "markers": "python_version >= '3.9'", - "version": "==3.10.0" + "version": "==3.11.0" }, "attrs": { "hashes": [ - "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", - "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373" + "sha256:c647aa4a12dfbad9333ca4e71fe62ddc36f4e63b2d260a37a8b83d2f043ac309", + "sha256:d03ceb89cb322a8fd706d4fb91940737b6642aa36998fe130a9bc96c985eff32" ], "markers": "python_version >= '3.9'", - "version": "==25.4.0" + "version": "==26.1.0" }, "bcrypt": { "hashes": [ @@ -292,11 +292,11 @@ }, "certifi": { "hashes": [ - "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c", - "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120" + "sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa", + "sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7" ], "markers": "python_version >= '3.7'", - "version": "==2026.1.4" + "version": "==2026.2.25" }, "cffi": { "hashes": [ @@ -390,11 +390,11 @@ }, "click": { "hashes": [ - "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", - "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6" + "sha256:14162b8b3b3550a7d479eafa77dfd3c38d9dc8951f6f69c78913a8f9a7540fd5", + "sha256:1924d2c27c5653561cd2cae4548d1406039cb79b858b747cfea24924bbc1616d" ], "markers": "python_version >= '3.10'", - "version": "==8.3.1" + "version": "==8.3.2" }, "construct": { "hashes": [ @@ -407,47 +407,59 @@ }, "cryptography": { "hashes": [ - "sha256:02f55fb4f8b79c1221b0961488eaae21015b69b210e18c386b69de182ebb1259", - "sha256:157f1f3b8d941c2bd8f3ffee0af9b049c9665c39d3da9db2dc338feca5e98a43", - "sha256:192ed30fac1728f7587c6f4613c29c584abdc565d7417c13904708db10206645", - "sha256:21a83f6f35b9cc656d71b5de8d519f566df01e660ac2578805ab245ffd8523f8", - "sha256:25cd194c39fa5a0aa4169125ee27d1172097857b27109a45fadc59653ec06f44", - "sha256:3883076d5c4cc56dbef0b898a74eb6992fdac29a7b9013870b34efe4ddb39a0d", - "sha256:3bb0847e6363c037df8f6ede57d88eaf3410ca2267fb12275370a76f85786a6f", - "sha256:3be3f649d91cb182c3a6bd336de8b61a0a71965bd13d1a04a0e15b39c3d5809d", - "sha256:3f07943aa4d7dad689e3bb1638ddc4944cc5e0921e3c227486daae0e31a05e54", - "sha256:479d92908277bed6e1a1c69b277734a7771c2b78633c224445b5c60a9f4bc1d9", - "sha256:4ffc61e8f3bf5b60346d89cd3d37231019c17a081208dfbbd6e1605ba03fa137", - "sha256:5639c2b16764c6f76eedf722dbad9a0914960d3489c0cc38694ddf9464f1bb2f", - "sha256:58968d331425a6f9eedcee087f77fd3c927c88f55368f43ff7e0a19891f2642c", - "sha256:5d186f32e52e66994dce4f766884bcb9c68b8da62d61d9d215bfe5fb56d21334", - "sha256:5d20cc348cca3a8aa7312f42ab953a56e15323800ca3ab0706b8cd452a3a056c", - "sha256:6866df152b581f9429020320e5eb9794c8780e90f7ccb021940d7f50ee00ae0b", - "sha256:7d5fe7195c27c32a64955740b949070f21cba664604291c298518d2e255931d2", - "sha256:896530bc9107b226f265effa7ef3f21270f18a2026bc09fed1ebd7b66ddf6375", - "sha256:962bc30480a08d133e631e8dfd4783ab71cc9e33d5d7c1e192f0b7c06397bb88", - "sha256:978631ec51a6bbc0b7e58f23b68a8ce9e5f09721940933e9c217068388789fe5", - "sha256:9b4d4a5dbee05a2c390bf212e78b99434efec37b17a4bff42f50285c5c8c9647", - "sha256:ab0b005721cc0039e885ac3503825661bd9810b15d4f374e473f8c89b7d5460c", - "sha256:af653022a0c25ef2e3ffb2c673a50e5a0d02fecc41608f4954176f1933b12359", - "sha256:b0cc66c74c797e1db750aaa842ad5b8b78e14805a9b5d1348dc603612d3e3ff5", - "sha256:b424563394c369a804ecbee9b06dfb34997f19d00b3518e39f83a5642618397d", - "sha256:c138abae3a12a94c75c10499f1cbae81294a6f983b3af066390adee73f433028", - "sha256:c6cd67722619e4d55fdb42ead64ed8843d64638e9c07f4011163e46bc512cf01", - "sha256:c91fc8e8fd78af553f98bc7f2a1d8db977334e4eea302a4bfd75b9461c2d8904", - "sha256:cad399780053fb383dc067475135e41c9fe7d901a97dd5d9c5dfb5611afc0d7d", - "sha256:cb90f60e03d563ca2445099edf605c16ed1d5b15182d21831f58460c48bffb93", - "sha256:dad80b45c22e05b259e33ddd458e9e2ba099c86ccf4e88db7bbab4b747b18d06", - "sha256:dd3db61b8fe5be220eee484a17233287d0be6932d056cf5738225b9c05ef4fff", - "sha256:e28d62e59a4dbd1d22e747f57d4f00c459af22181f0b2f787ea83f5a876d7c76", - "sha256:e909df4053064a97f1e6565153ff8bb389af12c5c8d29c343308760890560aff", - "sha256:f3ffef566ac88f75967d7abd852ed5f182da252d23fac11b4766da3957766759", - "sha256:fc3c9babc1e1faefd62704bb46a69f359a9819eb0292e40df3fb6e3574715cd4", - "sha256:fe19d8bc5536a91a24a8133328880a41831b6c5df54599a8417b62fe015d3053" + "sha256:04959522f938493042d595a736e7dbdff6eb6cc2339c11465b3ff89343b65f65", + "sha256:128c5edfe5e5938b86b03941e94fac9ee793a94452ad1365c9fc3f4f62216832", + "sha256:1d25aee46d0c6f1a501adcddb2d2fee4b979381346a78558ed13e50aa8a59067", + "sha256:24402210aa54baae71d99441d15bb5a1919c195398a87b563df84468160a65de", + "sha256:258514877e15963bd43b558917bc9f54cf7cf866c38aa576ebf47a77ddbc43a4", + "sha256:35719dc79d4730d30f1c2b6474bd6acda36ae2dfae1e3c16f2051f215df33ce0", + "sha256:397655da831414d165029da9bc483bed2fe0e75dde6a1523ec2fe63f3c46046b", + "sha256:3986ac1dee6def53797289999eabe84798ad7817f3e97779b5061a95b0ee4968", + "sha256:420b1e4109cc95f0e5700eed79908cef9268265c773d3a66f7af1eef53d409ef", + "sha256:42a1e5f98abb6391717978baf9f90dc28a743b7d9be7f0751a6f56a75d14065b", + "sha256:462ad5cb1c148a22b2e3bcc5ad52504dff325d17daf5df8d88c17dda1f75f2a4", + "sha256:506c4ff91eff4f82bdac7633318a526b1d1309fc07ca76a3ad182cb5b686d6d3", + "sha256:5ad9ef796328c5e3c4ceed237a183f5d41d21150f972455a9d926593a1dcb308", + "sha256:5d1c02a14ceb9148cc7816249f64f623fbfee39e8c03b3650d842ad3f34d637e", + "sha256:5e51be372b26ef4ba3de3c167cd3d1022934bc838ae9eaad7e644986d2a3d163", + "sha256:60627cf07e0d9274338521205899337c5d18249db56865f943cbe753aa96f40f", + "sha256:65814c60f8cc400c63131584e3e1fad01235edba2614b61fbfbfa954082db0ee", + "sha256:73510b83623e080a2c35c62c15298096e2a5dc8d51c3b4e1740211839d0dea77", + "sha256:7bbc6ccf49d05ac8f7d7b5e2e2c33830d4fe2061def88210a126d130d7f71a85", + "sha256:80406c3065e2c55d7f49a9550fe0c49b3f12e5bfff5dedb727e319e1afb9bf99", + "sha256:84d4cced91f0f159a7ddacad249cc077e63195c36aac40b4150e7a57e84fffe7", + "sha256:8a469028a86f12eb7d2fe97162d0634026d92a21f3ae0ac87ed1c4a447886c83", + "sha256:91bbcb08347344f810cbe49065914fe048949648f6bd5c2519f34619142bbe85", + "sha256:935ce7e3cfdb53e3536119a542b839bb94ec1ad081013e9ab9b7cfd478b05006", + "sha256:9694078c5d44c157ef3162e3bf3946510b857df5a3955458381d1c7cfc143ddb", + "sha256:a1529d614f44b863a7b480c6d000fe93b59acee9c82ffa027cfadc77521a9f5e", + "sha256:abad9dac36cbf55de6eb49badd4016806b3165d396f64925bf2999bcb67837ba", + "sha256:b36a4695e29fe69215d75960b22577197aca3f7a25b9cf9d165dcfe9d80bc325", + "sha256:b7b412817be92117ec5ed95f880defe9cf18a832e8cafacf0a22337dc1981b4d", + "sha256:c5b1ccd1239f48b7151a65bc6dd54bcfcc15e028c8ac126d3fada09db0e07ef1", + "sha256:cbd5fb06b62bd0721e1170273d3f4d5a277044c47ca27ee257025146c34cbdd1", + "sha256:cdf1a610ef82abb396451862739e3fc93b071c844399e15b90726ef7470eeaf2", + "sha256:cdfbe22376065ffcf8be74dc9a909f032df19bc58a699456a21712d6e5eabfd0", + "sha256:d02c738dacda7dc2a74d1b2b3177042009d5cab7c7079db74afc19e56ca1b455", + "sha256:d151173275e1728cf7839aaa80c34fe550c04ddb27b34f48c232193df8db5842", + "sha256:d23c8ca48e44ee015cd0a54aeccdf9f09004eba9fc96f38c911011d9ff1bd457", + "sha256:d3b99c535a9de0adced13d159c5a9cf65c325601aa30f4be08afd680643e9c15", + "sha256:d5f7520159cd9c2154eb61eb67548ca05c5774d39e9c2c4339fd793fe7d097b2", + "sha256:db0f493b9181c7820c8134437eb8b0b4792085d37dbb24da050476ccb664e59c", + "sha256:e06acf3c99be55aa3b516397fe42f5855597f430add9c17fa46bf2e0fb34c9bb", + "sha256:e4cfd68c5f3e0bfdad0d38e023239b96a2fe84146481852dffbcca442c245aa5", + "sha256:ea42cbe97209df307fdc3b155f1b6fa2577c0defa8f1f7d3be7d31d189108ad4", + "sha256:ebd6daf519b9f189f85c479427bbd6e9c9037862cf8fe89ee35503bd209ed902", + "sha256:f247c8c1a1fb45e12586afbb436ef21ff1e80670b2861a90353d9b025583d246", + "sha256:fbfd0e5f273877695cb93baf14b185f4878128b250cc9f8e617ea0c025dfb022", + "sha256:fc9ab8856ae6cf7c9358430e49b368f3108f050031442eaeb6b9d87e4dcf4e4f", + "sha256:fcd8eac50d9138c1d7fc53a653ba60a2bee81a505f9f8850b6b2888555a45d0e", + "sha256:fdd1736fed309b4300346f88f74cd120c27c56852c3838cab416e7a166f67298", + "sha256:ffca7aa1d00cf7d6469b988c581598f2259e46215e0140af408966a24cf086ce" ], "index": "pypi", - "markers": "python_version >= '3.7' and python_full_version not in '3.9.0, 3.9.1'", - "version": "==44.0.3" + "markers": "python_version >= '3.8' and python_full_version not in '3.9.0, 3.9.1'", + "version": "==46.0.7" }, "docopt": { "hashes": [ @@ -466,20 +478,20 @@ }, "filelock": { "hashes": [ - "sha256:a2241ff4ddde2a7cebddf78e39832509cb045d18ec1a09d7248d6bfc6bfbbe64", - "sha256:fbba7237d6ea277175a32c54bb71ef814a8546d8601269e1bfc388de333974e8" + "sha256:4ed1010aae813c4ee8d9c660e4792475ee60c4a0ba76073ceaf862bd317e3ca6", + "sha256:de9af6712788e7171df1b28b15eba2446c69721433fa427a9bee07b17820a9db" ], "index": "pypi", "markers": "python_version >= '3.10'", - "version": "==3.20.2" + "version": "==3.28.0" }, "flask": { "hashes": [ - "sha256:bf656c15c80190ed628ad08cdfd3aaa35beb087855e2f494910aa3774cc4fd87", - "sha256:ca1d8112ec8a6158cc29ea4858963350011b5c846a414cdb7a954aa9e967d03c" + "sha256:0ef0e52b8a9cd932855379197dd8f94047b359ca0a78695144304cb45f87c9eb", + "sha256:f4bcbefc124291925f1a26446da31a5178f9483862233b23c0c96a20701f670c" ], "markers": "python_version >= '3.9'", - "version": "==3.1.2" + "version": "==3.1.3" }, "h11": { "hashes": [ @@ -655,33 +667,33 @@ "mitmproxy": { "git": "https://github.com/citusdata/mitmproxy.git", "hashes": [ - "sha256:95db7b57b21320a0c76e59e1d6644daaa431291cdf89419608301424651199b4" + "sha256:7b111ba3b83b34b0d9b653044685db7c3f5fbc63b39b8f06439642da83910713" ], "index": "pypi", "markers": "python_version >= '3.12'", - "ref": "30b588eb1b7c66da233d7c471ec23f2b632ed531", - "version": "==11.0.2" + "ref": "70bad9a3c098f605e5f8b25553e5db5334018ff1", + "version": "==12.2.2" }, "mitmproxy-linux": { "hashes": [ - "sha256:0bea9353c71ebfd2174f6730b3fd0fdff3adea1aa15450035bed3b83e36ef455", - "sha256:2238455e65970382825baed2e998601ea82d8dcaae51bd8ee0859d596524a822", - "sha256:fbcb25316e95d0b2b5ced4e0cc3d90fdb1b7169300a005cc79339894d665363a" + "sha256:94b10fee02aa42287739623cef921e1a53955005d45c9e2fa309ae9f0bf8d37d", + "sha256:b4413e27c692f30036ad6d73432826e728ede026fac8e51651d0c545dd0177f2", + "sha256:ee842865a05f69196004ddcb29d50af0602361d9d6acee04f370f7e01c3674e8" ], "markers": "python_version >= '3.12'", - "version": "==0.12.8" + "version": "==0.12.9" }, "mitmproxy-rs": { "hashes": [ - "sha256:14ea236d0950ab35d667b78b5fe15d43e7345e166e22144624a1283edc78443e", - "sha256:16afd0fc1a00d586ffe2027d217908c3e0389d7d0897eccda6e59fda991e89ba", - "sha256:739591f696cf29913302a72fa9644cf97228774604304a2ea3987fe5588d231c", - "sha256:b0ead519f5a4ab019e7912544c0642f28f8336036ef1480e42a772a8cc947550", - "sha256:c5b0799808a4de0ee60e8f350043820ad56eea738ce3ce25d5c6faaa245b6c9a" + "sha256:1fb9fb4aac9ecb82e2c3c5c439ef5e4961be7934d80ade5e9a99c0a944b8ea2f", + "sha256:1fd716e87da8be3c62daa4325a5ff42bedd951fb8614c5f66caa94b7c21e2593", + "sha256:245922663440330c4b5a36d0194ed559b1dbd5e38545db2eb947180ed12a5e92", + "sha256:afeb3a2da2bc26474e1a2febaea4432430c5fde890dfce33bc4c1e65e6baef1b", + "sha256:c6ffc35c002c675cac534442d92d1cdebd66fafd63754ad33b92ae968ea6e449" ], "index": "pypi", "markers": "python_version >= '3.12'", - "version": "==0.12.8" + "version": "==0.12.9" }, "msgpack": { "hashes": [ @@ -753,11 +765,11 @@ }, "packaging": { "hashes": [ - "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", - "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f" + "sha256:5d9c0669c6285e491e0ced2eee587eaf67b670d94a19e94e3984a481aba6802f", + "sha256:f042152b681c4bfac5cae2742a55e103d27ab2ec0f3d88037136b6bfe7c9c5de" ], "markers": "python_version >= '3.8'", - "version": "==25.0" + "version": "==26.1" }, "pluggy": { "hashes": [ @@ -769,12 +781,12 @@ }, "psycopg": { "hashes": [ - "sha256:3e94bc5f4690247d734599af56e51bae8e0db8e4311ea413f801fef82b14a99b", - "sha256:707a67975ee214d200511177a6a80e56e654754c9afca06a7194ea6bbfde9ca7" + "sha256:5e9a47458b3c1583326513b2556a2a9473a1001a56c9efe9e587245b43148dd9", + "sha256:f96525a72bcfade6584ab17e89de415ff360748c766f0106959144dcbb38c698" ], "index": "pypi", "markers": "python_version >= '3.10'", - "version": "==3.3.2" + "version": "==3.3.3" }, "publicsuffix2": { "hashes": [ @@ -785,11 +797,11 @@ }, "pyasn1": { "hashes": [ - "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", - "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034" + "sha256:697a8ecd6d98891189184ca1fa05d1bb00e2f84b5977c481452050549c8a72cf", + "sha256:a80184d120f0864a52a073acc6fc642847d0be408e7c7252f31390c0f4eadcde" ], "markers": "python_version >= '3.8'", - "version": "==0.6.1" + "version": "==0.6.3" }, "pyasn1-modules": { "hashes": [ @@ -801,53 +813,53 @@ }, "pycparser": { "hashes": [ - "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", - "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934" + "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29", + "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992" ], - "markers": "python_version >= '3.8'", - "version": "==2.23" + "markers": "python_version >= '3.10'", + "version": "==3.0" }, "pygments": { "hashes": [ - "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", - "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b" + "sha256:6757cd03768053ff99f3039c1a36d6c0aa0b263438fcab17520b30a303a82b5f", + "sha256:81a9e26dd42fd28a23a2d169d86d7ac03b46e2f8b59ed4698fb4785f946d0176" ], - "markers": "python_version >= '3.8'", - "version": "==2.19.2" + "markers": "python_version >= '3.9'", + "version": "==2.20.0" }, "pylsqpack": { "hashes": [ - "sha256:2d91d87672beb0beff6a866dbf35e8b45791d8dffcd5cfd9d8cc397001101fd5", - "sha256:2f9a2ef59588d32cd02847c6b9d7140440f67a0751da99f96a2ff4edadc85eae", - "sha256:3f7d78352e764732ac1a9ab109aa84e003996a7d64de7098cb20bdc007cf7613", - "sha256:498b374b16b51532997998c4cf4021161d2a611f5ea6b02ad95ca99815c54abf", - "sha256:4e5b0b5ec92be6e5e6eb1c52d45271c5c7f8f2a2cd8c672ab240ac2cd893cd26", - "sha256:829a2466b80af9766cf0ad795b866796a4000cec441a0eb222357efd01ec6d42", - "sha256:8a9e25c5a98a0959c6511aaf7d1a6ac0d6146be349a8c3c09fec2e5250cb2901", - "sha256:8ba86c384dcf8952cef190f8cc4d61cb2a8e4eeaf25093c6aa38b9b696ac82dc", - "sha256:978497811bb58cf7ae11c0e1d4cf9bdf6bccef77556d039ae1836b458cb235fc", - "sha256:b516d56078a16592596ea450ea20e9a54650af759754e2e807b7046be13c83ee", - "sha256:db03232c85855cb03226447e41539f8631d7d4e5483d48206e30d470a9cb07a1", - "sha256:f55b126940d8b3157331f123d4428d703a698a6db65a6a7891f7ec1b90c86c56" + "sha256:23b4d8af48836893beac356c10ca268161953de5bf9ed691526a93f5c82433e9", + "sha256:54978a9879471596d84bbad5e67d727014048926bc5bb2dac0eb3701b48c5ac9", + "sha256:6024854eb16d32803d4890fb90a73b9348c74b61c0770680aefaaa75f8456e8c", + "sha256:8da12be7b35b7c9a8cf73a4c077f72e5022a311f80a401c79904213376f2d767", + "sha256:8ec455f44614228f89e38d40c1b1e37895620e20ec6b21e3b562fa8b79a23890", + "sha256:8edf48d0a023cd3629b2c4aaccac9b79a46d566c0f61e7416b5678228433763d", + "sha256:b6a8bb42127d5ece8d301a673c8205df25b73b69f8c46b9f0c3034588de1789a", + "sha256:c3e2327af25ee616ce4483a8748f0957cf017cbca82d58ed15efea68f70f94ff", + "sha256:caf63ddc2e581c764d17432893acce02c5c29ff879d77c2abf1e26aa4eeb831b", + "sha256:e3dc5f146fd456b50b227858aed59faa0ff8445aa426e69bb4e50d46c487aab0", + "sha256:e3f977d419c60c1d6c2240e6d7a52df820d37eb8c36b4057113bcd7859f53e2c", + "sha256:e7d956dbc8f7d597b237b9157d0a16bc7c655a1b031239763c18dc8582aff8cc" ], "markers": "python_version >= '3.10'", - "version": "==0.3.23" + "version": "==0.3.24" }, "pyopenssl": { "hashes": [ - "sha256:2b11f239acc47ac2e5aca04fd7fa829800aeee22a2eb30d744572a157bd8a1ab", - "sha256:8d031884482e0c67ee92bf9a4d8cceb08d92aba7136432ffb0703c5280fc205b" + "sha256:1fda6fc034d5e3d179d39e59c1895c9faeaf40a79de5fc4cbbfbe0d36f4a77b6", + "sha256:c981cb0a3fd84e8602d7afc209522773b94c1c2446a3c710a75b06fe1beae329" ], "markers": "python_version >= '3.7'", - "version": "==25.1.0" + "version": "==25.3.0" }, "pyparsing": { "hashes": [ - "sha256:2df8d5b7b2802ef88e8d016a2eb9c7aeaa923529cd251ed0fe4608275d4105b6", - "sha256:e38a4f02064cf41fe6593d328d0512495ad1f3d8a91c4f73fc401b3079a59a5e" + "sha256:850ba148bd908d7e2411587e247a1e4f0327839c40e2e5e6d05a007ecc69911d", + "sha256:c777f4d763f140633dcb6d8a3eda953bf7a214dc4eff598413c070bcdc117cbc" ], "markers": "python_version >= '3.9'", - "version": "==3.2.5" + "version": "==3.3.2" }, "pyperclip": { "hashes": [ @@ -858,12 +870,12 @@ }, "pytest": { "hashes": [ - "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", - "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11" + "sha256:2c5efc453d45394fdd706ade797c0a81091eccd1d6e4bccfcd476e2b8e0ab5d9", + "sha256:b86ada508af81d19edeb213c681b1d48246c1a91d304c6c81a427674c17eb91c" ], "index": "pypi", "markers": "python_version >= '3.10'", - "version": "==9.0.2" + "version": "==9.0.3" }, "pytest-asyncio": { "hashes": [ @@ -983,78 +995,11 @@ }, "ruamel.yaml": { "hashes": [ - "sha256:048f26d64245bae57a4f9ef6feb5b552a386830ef7a826f235ffb804c59efbba", - "sha256:a6e587512f3c998b2225d68aa1f35111c29fad14aed561a26e73fab729ec5e5a" - ], - "markers": "python_version >= '3.8'", - "version": "==0.18.16" - }, - "ruamel.yaml.clib": { - "hashes": [ - "sha256:014181cdec565c8745b7cbc4de3bf2cc8ced05183d986e6d1200168e5bb59490", - "sha256:04d21dc9c57d9608225da28285900762befbb0165ae48482c15d8d4989d4af14", - "sha256:05c70f7f86be6f7bee53794d80050a28ae7e13e4a0087c1839dcdefd68eb36b6", - "sha256:0ba6604bbc3dfcef844631932d06a1a4dcac3fee904efccf582261948431628a", - "sha256:11e5499db1ccbc7f4b41f0565e4f799d863ea720e01d3e99fa0b7b5fcd7802c9", - "sha256:1b45498cc81a4724a2d42273d6cfc243c0547ad7c6b87b4f774cb7bcc131c98d", - "sha256:1bb7b728fd9f405aa00b4a0b17ba3f3b810d0ccc5f77f7373162e9b5f0ff75d5", - "sha256:1f66f600833af58bea694d5892453f2270695b92200280ee8c625ec5a477eed3", - "sha256:27dc656e84396e6d687f97c6e65fb284d100483628f02d95464fd731743a4afe", - "sha256:2812ff359ec1f30129b62372e5f22a52936fac13d5d21e70373dbca5d64bb97c", - "sha256:2b216904750889133d9222b7b873c199d48ecbb12912aca78970f84a5aa1a4bc", - "sha256:331fb180858dd8534f0e61aa243b944f25e73a4dae9962bd44c46d1761126bbf", - "sha256:3cb75a3c14f1d6c3c2a94631e362802f70e83e20d1f2b2ef3026c05b415c4900", - "sha256:3eb199178b08956e5be6288ee0b05b2fb0b5c1f309725ad25d9c6ea7e27f962a", - "sha256:424ead8cef3939d690c4b5c85ef5b52155a231ff8b252961b6516ed7cf05f6aa", - "sha256:45702dfbea1420ba3450bb3dd9a80b33f0badd57539c6aac09f42584303e0db6", - "sha256:468858e5cbde0198337e6a2a78eda8c3fb148bdf4c6498eaf4bc9ba3f8e780bd", - "sha256:46895c17ead5e22bea5e576f1db7e41cb273e8d062c04a6a49013d9f60996c25", - "sha256:46e4cc8c43ef6a94885f72512094e482114a8a706d3c555a34ed4b0d20200600", - "sha256:480894aee0b29752560a9de46c0e5f84a82602f2bc5c6cde8db9a345319acfdf", - "sha256:4b293a37dc97e2b1e8a1aec62792d1e52027087c8eea4fc7b5abd2bdafdd6642", - "sha256:4be366220090d7c3424ac2b71c90d1044ea34fca8c0b88f250064fd06087e614", - "sha256:4d1032919280ebc04a80e4fb1e93f7a738129857eaec9448310e638c8bccefcf", - "sha256:4d3b58ab2454b4747442ac76fab66739c72b1e2bb9bd173d7694b9f9dbc9c000", - "sha256:4dcec721fddbb62e60c2801ba08c87010bd6b700054a09998c4d09c08147b8fb", - "sha256:512571ad41bba04eac7268fe33f7f4742210ca26a81fe0c75357fa682636c690", - "sha256:542d77b72786a35563f97069b9379ce762944e67055bea293480f7734b2c7e5e", - "sha256:56ea19c157ed8c74b6be51b5fa1c3aff6e289a041575f0556f66e5fb848bb137", - "sha256:5d3c9210219cbc0f22706f19b154c9a798ff65a6beeafbf77fc9c057ec806f7d", - "sha256:5fea0932358e18293407feb921d4f4457db837b67ec1837f87074667449f9401", - "sha256:617d35dc765715fa86f8c3ccdae1e4229055832c452d4ec20856136acc75053f", - "sha256:64da03cbe93c1e91af133f5bec37fd24d0d4ba2418eaf970d7166b0a26a148a2", - "sha256:65f48245279f9bb301d1276f9679b82e4c080a1ae25e679f682ac62446fac471", - "sha256:6f1d38cbe622039d111b69e9ca945e7e3efebb30ba998867908773183357f3ed", - "sha256:713cd68af9dfbe0bb588e144a61aad8dcc00ef92a82d2e87183ca662d242f524", - "sha256:71845d377c7a47afc6592aacfea738cc8a7e876d586dfba814501d8c53c1ba60", - "sha256:753faf20b3a5906faf1fc50e4ddb8c074cb9b251e00b14c18b28492f933ac8ef", - "sha256:7e74ea87307303ba91073b63e67f2c667e93f05a8c63079ee5b7a5c8d0d7b043", - "sha256:88eea8baf72f0ccf232c22124d122a7f26e8a24110a0273d9bcddcb0f7e1fa03", - "sha256:923816815974425fbb1f1bf57e85eca6e14d8adc313c66db21c094927ad01815", - "sha256:9b6f7d74d094d1f3a4e157278da97752f16ee230080ae331fcc219056ca54f77", - "sha256:a8220fd4c6f98485e97aea65e1df76d4fed1678ede1fe1d0eed2957230d287c4", - "sha256:ab0df0648d86a7ecbd9c632e8f8d6b21bb21b5fc9d9e095c796cacf32a728d2d", - "sha256:ac9b8d5fa4bb7fd2917ab5027f60d4234345fd366fe39aa711d5dca090aa1467", - "sha256:badd1d7283f3e5894779a6ea8944cc765138b96804496c91812b2829f70e18a7", - "sha256:bdc06ad71173b915167702f55d0f3f027fc61abd975bd308a0968c02db4a4c3e", - "sha256:bf0846d629e160223805db9fe8cc7aec16aaa11a07310c50c8c7164efa440aec", - "sha256:bfd309b316228acecfa30670c3887dcedf9b7a44ea39e2101e75d2654522acd4", - "sha256:c583229f336682b7212a43d2fa32c30e643d3076178fb9f7a6a14dde85a2d8bd", - "sha256:cb15a2e2a90c8475df45c0949793af1ff413acfb0a716b8b94e488ea95ce7cff", - "sha256:d290eda8f6ada19e1771b54e5706b8f9807e6bb08e873900d5ba114ced13e02c", - "sha256:da3d6adadcf55a93c214d23941aef4abfd45652110aed6580e814152f385b862", - "sha256:dcc7f3162d3711fd5d52e2267e44636e3e566d1e5675a5f0b30e98f2c4af7974", - "sha256:def5663361f6771b18646620fca12968aae730132e104688766cf8a3b1d65922", - "sha256:e5e9f630c73a490b758bf14d859a39f375e6999aea5ddd2e2e9da89b9953486a", - "sha256:e9fde97ecb7bb9c41261c2ce0da10323e9227555c674989f8d9eb7572fc2098d", - "sha256:ef71831bd61fbdb7aa0399d5c4da06bea37107ab5c79ff884cc07f2450910262", - "sha256:f4421ab780c37210a07d138e56dd4b51f8642187cdfb433eb687fe8c11de0144", - "sha256:f6d3655e95a80325b84c4e14c080b2470fe4f33b6846f288379ce36154993fb1", - "sha256:fd4c928ddf6bce586285daa6d90680b9c291cfd045fc40aad34e445d57b1bf51", - "sha256:fe239bdfdae2302e93bd6e8264bd9b71290218fff7084a9db250b55caaccf43f" + "sha256:27592957fedf6e0b62f281e96effd28043345e0e66001f97683aa9a40c667c93", + "sha256:53eb66cd27849eff968ebf8f0bf61f46cdac2da1d1f3576dd4ccee9b25c31993" ], "markers": "python_version >= '3.9'", - "version": "==0.2.15" + "version": "==0.19.1" }, "service-identity": { "hashes": [ @@ -1073,22 +1018,20 @@ }, "tornado": { "hashes": [ - "sha256:06ceb1300fd70cb20e43b1ad8aaee0266e69e7ced38fa910ad2e03285009ce7c", - "sha256:2436822940d37cde62771cff8774f4f00b3c8024fe482e16ca8387b8a2724db6", - "sha256:583a52c7aa94ee046854ba81d9ebb6c81ec0fd30386d96f7640c96dad45a03ef", - "sha256:74db443e0f5251be86cbf37929f84d8c20c27a355dd452a5cfa2aada0d001ec4", - "sha256:ab53c8f9a0fa351e2c0741284e06c7a45da86afb544133201c5cc8578eb076a0", - "sha256:b0fe179f28d597deab2842b86ed4060deec7388f1fd9c1b4a41adf8af058907e", - "sha256:b186e85d1e3536d69583d2298423744740986018e393d0321df7340e71898882", - "sha256:b5e735ab2889d7ed33b32a459cac490eda71a1ba6857b0118de476ab6c366c04", - "sha256:c6f29e94d9b37a95013bb669616352ddb82e3bfe8326fccee50583caebc8a5f0", - "sha256:d6c33dc3672e3a1f3618eb63b7ef4683a7688e7b9e6e8f0d9aa5726360a004af", - "sha256:e56a5af51cc30dd2cae649429af65ca2f6571da29504a07995175df14c18f35f", - "sha256:e792706668c87709709c18b353da1f7662317b563ff69f00bab83595940c7108" + "sha256:192b8f3ea91bd7f1f50c06955416ed76c6b72f96779b962f07f911b91e8d30e9", + "sha256:2c9a876e094109333f888539ddb2de4361743e5d21eece20688e3e351e4990a6", + "sha256:36abed1754faeb80fbd6e64db2758091e1320f6bba74a4cf8c09cd18ccce8aca", + "sha256:3f54aa540bdbfee7b9eb268ead60e7d199de5021facd276819c193c0fb28ea4e", + "sha256:435319e9e340276428bbdb4e7fa732c2d399386d1de5686cb331ec8eee754f07", + "sha256:487dc9cc380e29f58c7ab88f9e27cdeef04b2140862e5076a66fb6bb68bb1bfa", + "sha256:6443a794ba961a9f619b1ae926a2e900ac20c34483eea67be4ed8f1e58d3ef7b", + "sha256:65a7f1d46d4bb41df1ac99f5fcb685fb25c7e61613742d5108b010975a9a6521", + "sha256:dd3eafaaeec1c7f2f8fdcd5f964e8907ad788fe8a5a32c4426fbbdda621223b7", + "sha256:e74c92e8e65086b338fd56333fb9a68b9f6f2fe7ad532645a290a464bcf46be5" ], "index": "pypi", "markers": "python_version >= '3.9'", - "version": "==6.5.2" + "version": "==6.5.5" }, "typing-extensions": { "hashes": [ @@ -1101,36 +1044,36 @@ }, "urwid": { "hashes": [ - "sha256:300804dd568cda5aa1c5b204227bd0cfe7a62cef2d00987c5eb2e4e64294ed9b", - "sha256:ede36ecc99a293bbb4b5e5072c7b7bb943eb3bed17decf89b808209ed2dead15" + "sha256:24be27ffafdb68c09cd95dc21b60ccfd02843320b25ce5feee1708b34fad5a23", + "sha256:f188144261224fdfc9b56b4222869bd0eac90fd7895cf1e376129cdc7e13bc84" ], "markers": "python_full_version >= '3.9.0'", - "version": "==3.0.3" + "version": "==3.0.5" }, "wcwidth": { "hashes": [ - "sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605", - "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1" + "sha256:1a3a1e510b553315f8e146c54764f4fb6264ffad731b3d78088cdb1478ffbdad", + "sha256:cdc4e4262d6ef9a1a57e018384cbeb1208d8abbc64176027e2c2455c81313159" ], - "markers": "python_version >= '3.6'", - "version": "==0.2.14" + "markers": "python_version >= '3.8'", + "version": "==0.6.0" }, "werkzeug": { "hashes": [ - "sha256:2ad50fb9ed09cc3af22c54698351027ace879a0b60a3b5edf5730b2f7d876905", - "sha256:cd3cd98b1b92dc3b7b3995038826c68097dcb16f9baa63abe35f20eafeb9fe5e" + "sha256:210c6bede5a420a913956b4791a7f4d6843a43b6fcee4dfa08a65e93007d0d25", + "sha256:7ddf3357bb9564e407607f988f683d72038551200c704012bb9a4c523d42f131" ], "index": "pypi", "markers": "python_version >= '3.9'", - "version": "==3.1.4" + "version": "==3.1.6" }, "wsproto": { "hashes": [ - "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065", - "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736" + "sha256:61eea322cdf56e8cc904bd3ad7573359a242ba65688716b0710a5eb12beab584", + "sha256:b86885dcf294e15204919950f666e06ffc6c7c114ca900b060d6e16293528294" ], - "markers": "python_full_version >= '3.7.0'", - "version": "==1.2.0" + "markers": "python_version >= '3.10'", + "version": "==1.3.2" }, "zstandard": { "hashes": [ @@ -1242,48 +1185,53 @@ "develop": { "attrs": { "hashes": [ - "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", - "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373" + "sha256:c647aa4a12dfbad9333ca4e71fe62ddc36f4e63b2d260a37a8b83d2f043ac309", + "sha256:d03ceb89cb322a8fd706d4fb91940737b6642aa36998fe130a9bc96c985eff32" ], "markers": "python_version >= '3.9'", - "version": "==25.4.0" + "version": "==26.1.0" }, "black": { "hashes": [ - "sha256:14b3502784f09ce2443830e3133dacf2c0110d45191ed470ecb04d0f5f6fcb0f", - "sha256:17374989640fbca88b6a448129cd1745c5eb8d9547b464f281b251dd00155ccd", - "sha256:1c536fcf674217e87b8cc3657b81809d3c085d7bf3ef262ead700da345bfa6ea", - "sha256:1cbacacb19e922a1d75ef2b6ccaefcd6e93a2c05ede32f06a21386a04cedb981", - "sha256:1f93102e0c5bb3907451063e08b9876dbeac810e7da5a8bfb7aeb5a9ef89066b", - "sha256:2cd9c95431d94adc56600710f8813ee27eea544dd118d45896bb734e9d7a0dc7", - "sha256:30d2c30dc5139211dda799758559d1b049f7f14c580c409d6ad925b74a4208a8", - "sha256:394d4ddc64782e51153eadcaaca95144ac4c35e27ef9b0a42e121ae7e57a9175", - "sha256:3bb2b7a1f7b685f85b11fed1ef10f8a9148bceb49853e47a294a3dd963c1dd7d", - "sha256:4007b1393d902b48b36958a216c20c4482f601569d19ed1df294a496eb366392", - "sha256:5a2221696a8224e335c28816a9d331a6c2ae15a2ee34ec857dcf3e45dbfa99ad", - "sha256:63f626344343083322233f175aaf372d326de8436f5928c042639a4afbbf1d3f", - "sha256:649fff99a20bd06c6f727d2a27f401331dc0cc861fb69cde910fe95b01b5928f", - "sha256:680359d932801c76d2e9c9068d05c6b107f2584b2a5b88831c83962eb9984c1b", - "sha256:846ea64c97afe3bc677b761787993be4991810ecc7a4a937816dd6bddedc4875", - "sha256:b5e39e0fae001df40f95bd8cc36b9165c5e2ea88900167bddf258bacef9bbdc3", - "sha256:ccfa1d0cb6200857f1923b602f978386a3a2758a65b52e0950299ea014be6800", - "sha256:d37d422772111794b26757c5b55a3eade028aa3fde43121ab7b673d050949d65", - "sha256:ddacb691cdcdf77b96f549cf9591701d8db36b2f19519373d60d31746068dbf2", - "sha256:e6668650ea4b685440857138e5fe40cde4d652633b1bdffc62933d0db4ed9812", - "sha256:f9da3333530dbcecc1be13e69c250ed8dfa67f43c4005fb537bb426e19200d50", - "sha256:fe4d6476887de70546212c99ac9bd803d90b42fc4767f058a0baa895013fbb3e" + "sha256:0126ae5b7c09957da2bdbd91a9ba1207453feada9e9fe51992848658c6c8e01c", + "sha256:0f76ff19ec5297dd8e66eb64deda23631e642c9393ab592826fd4bdc97a4bce7", + "sha256:28ef38aee69e4b12fda8dba75e21f9b4f979b490c8ac0baa7cb505369ac9e1ff", + "sha256:2bd5aa94fc267d38bb21a70d7410a89f1a1d318841855f698746f8e7f51acd1b", + "sha256:2c50f5063a9641c7eed7795014ba37b0f5fa227f3d408b968936e24bc0566b07", + "sha256:2d6bfaf7fd0993b420bed691f20f9492d53ce9a2bcccea4b797d34e947318a78", + "sha256:41cd2012d35b47d589cb8a16faf8a32ef7a336f56356babd9fcf70939ad1897f", + "sha256:474c27574d6d7037c1bc875a81d9be0a9a4f9ee95e62800dab3cfaadbf75acd5", + "sha256:5602bdb96d52d2d0672f24f6ffe5218795736dd34807fd0fd55ccd6bf206168b", + "sha256:5e9d0d86df21f2e1677cc4bd090cd0e446278bcbbe49bf3659c308c3e402843e", + "sha256:5ed0ca58586c8d9a487352a96b15272b7fa55d139fc8496b519e78023a8dab0a", + "sha256:6c54a4a82e291a1fee5137371ab488866b7c86a3305af4026bdd4dc78642e1ac", + "sha256:6e131579c243c98f35bce64a7e08e87fb2d610544754675d4a0e73a070a5aa3a", + "sha256:855822d90f884905362f602880ed8b5df1b7e3ee7d0db2502d4388a954cc8c54", + "sha256:86a8b5035fce64f5dcd1b794cf8ec4d31fe458cf6ce3986a30deb434df82a1d2", + "sha256:8a33d657f3276328ce00e4d37fe70361e1ec7614da5d7b6e78de5426cb56332f", + "sha256:92c0ec1f2cc149551a2b7b47efc32c866406b6891b0ee4625e95967c8f4acfb1", + "sha256:9a5e9f45e5d5e1c5b5c29b3bd4265dcc90e8b92cf4534520896ed77f791f4da5", + "sha256:afc622538b430aa4c8c853f7f63bc582b3b8030fd8c80b70fb5fa5b834e575c2", + "sha256:b07fc0dab849d24a80a29cfab8d8a19187d1c4685d8a5e6385a5ce323c1f015f", + "sha256:b5e6f89631eb88a7302d416594a32faeee9fb8fb848290da9d0a5f2903519fc1", + "sha256:bf9bf162ed91a26f1adba8efda0b573bc6924ec1408a52cc6f82cb73ec2b142c", + "sha256:c7e72339f841b5a237ff14f7d3880ddd0fc7f98a1199e8c4327f9a4f478c1839", + "sha256:ddb113db38838eb9f043623ba274cfaf7d51d5b0c22ecb30afe58b1bb8322983", + "sha256:dfdd51fc3e64ea4f35873d1b3fb25326773d55d2329ff8449139ebaad7357efb", + "sha256:f1cd08e99d2f9317292a311dfe578fd2a24b15dbce97792f9c4d752275c1fa56", + "sha256:f89f2ab047c76a9c03f78d0d66ca519e389519902fa27e7a91117ef7611c0568" ], "index": "pypi", - "markers": "python_version >= '3.9'", - "version": "==24.10.0" + "markers": "python_version >= '3.10'", + "version": "==26.3.1" }, "click": { "hashes": [ - "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", - "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6" + "sha256:14162b8b3b3550a7d479eafa77dfd3c38d9dc8951f6f69c78913a8f9a7540fd5", + "sha256:1924d2c27c5653561cd2cae4548d1406039cb79b858b747cfea24924bbc1616d" ], "markers": "python_version >= '3.10'", - "version": "==8.3.1" + "version": "==8.3.2" }, "flake8": { "hashes": [ @@ -1305,12 +1253,12 @@ }, "isort": { "hashes": [ - "sha256:1bcabac8bc3c36c7fb7b98a76c8abb18e0f841a3ba81decac7691008592499c1", - "sha256:5513527951aadb3ac4292a41a16cbc50dd1642432f5e8c20057d414bdafb4187" + "sha256:171ac4ff559cdc060bcfff550bc8404a486fee0caab245679c2abe7cb253c78d", + "sha256:28b89bc70f751b559aeca209e6120393d43fbe2490de0559662be7a9787e3d75" ], "index": "pypi", "markers": "python_full_version >= '3.10.0'", - "version": "==7.0.0" + "version": "==8.0.1" }, "mccabe": { "hashes": [ @@ -1330,27 +1278,27 @@ }, "packaging": { "hashes": [ - "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", - "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f" + "sha256:5d9c0669c6285e491e0ced2eee587eaf67b670d94a19e94e3984a481aba6802f", + "sha256:f042152b681c4bfac5cae2742a55e103d27ab2ec0f3d88037136b6bfe7c9c5de" ], "markers": "python_version >= '3.8'", - "version": "==25.0" + "version": "==26.1" }, "pathspec": { "hashes": [ - "sha256:62f8558917908d237d399b9b338ef455a814801a4688bc41074b25feefd93472", - "sha256:fa32b1eb775ed9ba8d599b22c5f906dc098113989da2c00bf8b210078ca7fb92" + "sha256:0210e2ae8a21a9137c0d470578cb0e595af87edaa6ebf12ff176f14a02e0e645", + "sha256:fb6ae2fd4e7c921a165808a552060e722767cfa526f99ca5156ed2ce45a5c723" ], "markers": "python_version >= '3.9'", - "version": "==1.0.2" + "version": "==1.0.4" }, "platformdirs": { "hashes": [ - "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", - "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31" + "sha256:3bfa75b0ad0db84096ae777218481852c0ebc6c727b3168c1b9e0118e458cf0a", + "sha256:e61adb1d5e5cb3441b4b7710bea7e4c12250ca49439228cc1021c00dcfac0917" ], "markers": "python_version >= '3.10'", - "version": "==4.5.1" + "version": "==4.9.6" }, "pycodestyle": { "hashes": [ @@ -1367,6 +1315,54 @@ ], "markers": "python_version >= '3.9'", "version": "==3.4.0" + }, + "pytokens": { + "hashes": [ + "sha256:0fc71786e629cef478cbf29d7ea1923299181d0699dbe7c3c0f4a583811d9fc1", + "sha256:11edda0942da80ff58c4408407616a310adecae1ddd22eef8c692fe266fa5009", + "sha256:140709331e846b728475786df8aeb27d24f48cbcf7bcd449f8de75cae7a45083", + "sha256:24afde1f53d95348b5a0eb19488661147285ca4dd7ed752bbc3e1c6242a304d1", + "sha256:26cef14744a8385f35d0e095dc8b3a7583f6c953c2e3d269c7f82484bf5ad2de", + "sha256:27b83ad28825978742beef057bfe406ad6ed524b2d28c252c5de7b4a6dd48fa2", + "sha256:292052fe80923aae2260c073f822ceba21f3872ced9a68bb7953b348e561179a", + "sha256:29d1d8fb1030af4d231789959f21821ab6325e463f0503a61d204343c9b355d1", + "sha256:2a44ed93ea23415c54f3face3b65ef2b844d96aeb3455b8a69b3df6beab6acc5", + "sha256:30f51edd9bb7f85c748979384165601d028b84f7bd13fe14d3e065304093916a", + "sha256:34bcc734bd2f2d5fe3b34e7b3c0116bfb2397f2d9666139988e7a3eb5f7400e3", + "sha256:3ad72b851e781478366288743198101e5eb34a414f1d5627cdd585ca3b25f1db", + "sha256:3f901fe783e06e48e8cbdc82d631fca8f118333798193e026a50ce1b3757ea68", + "sha256:42f144f3aafa5d92bad964d471a581651e28b24434d184871bd02e3a0d956037", + "sha256:4a14d5f5fc78ce85e426aa159489e2d5961acf0e47575e08f35584009178e321", + "sha256:4a58d057208cb9075c144950d789511220b07636dd2e4708d5645d24de666bdc", + "sha256:4e691d7f5186bd2842c14813f79f8884bb03f5995f0575272009982c5ac6c0f7", + "sha256:5502408cab1cb18e128570f8d598981c68a50d0cbd7c61312a90507cd3a1276f", + "sha256:584c80c24b078eec1e227079d56dc22ff755e0ba8654d8383b2c549107528918", + "sha256:5ad948d085ed6c16413eb5fec6b3e02fa00dc29a2534f088d3302c47eb59adf9", + "sha256:670d286910b531c7b7e3c0b453fd8156f250adb140146d234a82219459b9640c", + "sha256:682fa37ff4d8e95f7df6fe6fe6a431e8ed8e788023c6bcc0f0880a12eab80ad1", + "sha256:6d6c4268598f762bc8e91f5dbf2ab2f61f7b95bdc07953b602db879b3c8c18e1", + "sha256:79fc6b8699564e1f9b521582c35435f1bd32dd06822322ec44afdeba666d8cb3", + "sha256:8bdb9d0ce90cbf99c525e75a2fa415144fd570a1ba987380190e8b786bc6ef9b", + "sha256:8fcb9ba3709ff77e77f1c7022ff11d13553f3c30299a9fe246a166903e9091eb", + "sha256:941d4343bf27b605e9213b26bfa1c4bf197c9c599a9627eb7305b0defcfe40c1", + "sha256:967cf6e3fd4adf7de8fc73cd3043754ae79c36475c1c11d514fc72cf5490094a", + "sha256:970b08dd6b86058b6dc07efe9e98414f5102974716232d10f32ff39701e841c4", + "sha256:97f50fd18543be72da51dd505e2ed20d2228c74e0464e4262e4899797803d7fa", + "sha256:9bd7d7f544d362576be74f9d5901a22f317efc20046efe2034dced238cbbfe78", + "sha256:add8bf86b71a5d9fb5b89f023a80b791e04fba57960aa790cc6125f7f1d39dfe", + "sha256:b35d7e5ad269804f6697727702da3c517bb8a5228afa450ab0fa787732055fc9", + "sha256:b49750419d300e2b5a3813cf229d4e5a4c728dae470bcc89867a9ad6f25a722d", + "sha256:d31b97b3de0f61571a124a00ffe9a81fb9939146c122c11060725bd5aea79975", + "sha256:d70e77c55ae8380c91c0c18dea05951482e263982911fc7410b1ffd1dadd3440", + "sha256:d9907d61f15bf7261d7e775bd5d7ee4d2930e04424bab1972591918497623a16", + "sha256:da5baeaf7116dced9c6bb76dc31ba04a2dc3695f3d9f74741d7910122b456edc", + "sha256:dc74c035f9bfca0255c1af77ddd2d6ae8419012805453e4b0e7513e17904545d", + "sha256:dcafc12c30dbaf1e2af0490978352e0c4041a7cde31f4f81435c2a5e8b9cabb6", + "sha256:ee44d0f85b803321710f9239f335aafe16553b39106384cef8e6de40cb4ef2f6", + "sha256:f66a6bbe741bd431f6d741e617e0f39ec7257ca1f89089593479347cc4d13324" + ], + "markers": "python_version >= '3.8'", + "version": "==0.4.1" } } } diff --git a/.github/actions/save_logs_and_results/action.yml b/.github/actions/save_logs_and_results/action.yml index b344c68f2ef..0a5d6a6d9d6 100644 --- a/.github/actions/save_logs_and_results/action.yml +++ b/.github/actions/save_logs_and_results/action.yml @@ -6,7 +6,7 @@ inputs: runs: using: composite steps: - - uses: actions/upload-artifact@v4.6.0 + - uses: actions/upload-artifact@v5 name: Upload logs with: name: ${{ inputs.folder }} diff --git a/.github/actions/setup_extension/action.yml b/.github/actions/setup_extension/action.yml index 33129f17de6..18fd8fe441e 100644 --- a/.github/actions/setup_extension/action.yml +++ b/.github/actions/setup_extension/action.yml @@ -17,7 +17,7 @@ runs: echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV fi shell: bash - - uses: actions/download-artifact@v4.1.8 + - uses: actions/download-artifact@v5 with: name: build-${{ env.PG_MAJOR }} - name: Install Extension diff --git a/.github/actions/upload_coverage/action.yml b/.github/actions/upload_coverage/action.yml index 784cb2a7654..880dfcca2f1 100644 --- a/.github/actions/upload_coverage/action.yml +++ b/.github/actions/upload_coverage/action.yml @@ -7,7 +7,7 @@ inputs: runs: using: composite steps: - - uses: codecov/codecov-action@v3 + - uses: codecov/codecov-action@v5 with: flags: ${{ inputs.flags }} token: ${{ inputs.codecov_token }} diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 4a46a282e76..0c61a1f15e4 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -3,6 +3,10 @@ run-name: Build & Test - ${{ github.event.pull_request.title || github.ref_name concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true +env: + # Opt JavaScript actions (e.g. actions/checkout@v5, actions/upload-artifact@v5) + # into Node.js 24 early to silence the Node 20 deprecation warning. + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" on: workflow_dispatch: inputs: @@ -32,7 +36,7 @@ jobs: style_checker_image_name: "ghcr.io/citusdata/stylechecker" style_checker_tools_version: "0.8.33" sql_snapshot_pg_version: "18.3" - image_suffix: "-vac4338a" + image_suffix: "-v4271d84" pg16_version: '{ "major": "16", "full": "16.13" }' pg17_version: '{ "major": "17", "full": "17.9" }' pg18_version: '{ "major": "18", "full": "18.3" }' @@ -48,7 +52,7 @@ jobs: image: ${{ needs.params.outputs.build_image_name }}:${{ needs.params.outputs.sql_snapshot_pg_version }}${{ needs.params.outputs.image_suffix }} options: --user root steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Check Snapshots run: | git config --global --add safe.directory ${GITHUB_WORKSPACE} @@ -62,7 +66,7 @@ jobs: - name: Check Snapshots run: | git config --global --add safe.directory ${GITHUB_WORKSPACE} - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 - name: Check C Style @@ -118,14 +122,14 @@ jobs: image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}" options: --user root steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Expose $PG_MAJOR to Github Env run: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV shell: bash - name: Build run: "./ci/build-citus.sh" shell: bash - - uses: actions/upload-artifact@v4.6.0 + - uses: actions/upload-artifact@v5 with: name: build-${{ env.PG_MAJOR }} path: |- @@ -200,8 +204,8 @@ jobs: make_targets: '["check-split", "check-multi", "check-multi-1", "check-isolation", "check-operations", "check-follower-cluster", "check-add-backup-node", "check-columnar", "check-columnar-isolation", "check-enterprise", "check-enterprise-isolation", "check-enterprise-isolation-logicalrep-1", "check-enterprise-isolation-logicalrep-2", "check-enterprise-isolation-logicalrep-3"]' image_suffix: ${{ needs.params.outputs.image_suffix }} image_name: ${{ needs.params.outputs.test_image_name }} - citus_libdir: "/opt/citus-versions/v14.0.0" - citus_libdir_label: "v14.0.0" + citus_libdir: "/opt/citus-versions/v14.0.1" + citus_libdir_label: "v14.0.1" n_1_mode: "all" secrets: codecov_token: ${{ secrets.CODECOV_TOKEN }} @@ -238,8 +242,8 @@ jobs: image_suffix: ${{ needs.params.outputs.image_suffix }} image_name: ${{ needs.params.outputs.test_image_name }} citus_version: "14.0-1" - citus_libdir: "/opt/citus-versions/v14.0.0" - citus_libdir_label: "v14.0.0" + citus_libdir: "/opt/citus-versions/v14.0.1" + citus_libdir_label: "v14.0.1" n_1_mode: "workeronly" secrets: codecov_token: ${{ secrets.CODECOV_TOKEN }} @@ -258,8 +262,8 @@ jobs: image_suffix: ${{ needs.params.outputs.image_suffix }} image_name: ${{ needs.params.outputs.test_image_name }} citus_version: "14.0-1" - citus_libdir: "/opt/citus-versions/v14.0.0" - citus_libdir_label: "v14.0.0" + citus_libdir: "/opt/citus-versions/v14.0.1" + citus_libdir_label: "v14.0.1" n_1_mode: "coordinatoronly" secrets: codecov_token: ${{ secrets.CODECOV_TOKEN }} @@ -283,7 +287,7 @@ jobs: - ${{ needs.params.outputs.pg18_version }} parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: "./.github/actions/setup_extension" - name: Test arbitrary configs run: |- @@ -335,7 +339,7 @@ jobs: old_pg_major: ${{ matrix.old_pg_major }} new_pg_major: ${{ matrix.new_pg_major }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: "./.github/actions/setup_extension" with: pg_major: "${{ env.old_pg_major }}" @@ -389,7 +393,7 @@ jobs: - ${{ needs.params.outputs.pg16_version }} - ${{ needs.params.outputs.pg17_version }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: "./.github/actions/setup_extension" with: skip_installation: true @@ -433,12 +437,12 @@ jobs: needs: - build steps: - - uses: actions/checkout@v4 - - uses: azure/login@v1 + - uses: actions/checkout@v5 + - uses: azure/login@v2 with: creds: ${{ secrets.AZURE_CREDENTIALS }} - name: install dependencies and run ch_benchmark tests - uses: azure/CLI@v1 + uses: azure/cli@v2 with: inlineScript: | cd ./src/test/hammerdb @@ -451,12 +455,12 @@ jobs: needs: - build steps: - - uses: actions/checkout@v4 - - uses: azure/login@v1 + - uses: actions/checkout@v5 + - uses: azure/login@v2 with: creds: ${{ secrets.AZURE_CREDENTIALS }} - name: install dependencies and run tpcc_benchmark tests - uses: azure/CLI@v1 + uses: azure/cli@v2 with: inlineScript: | cd ./src/test/hammerdb @@ -470,7 +474,7 @@ jobs: outputs: json: ${{ steps.parallelization.outputs.json }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: "./.github/actions/parallelization" id: parallelization with: @@ -483,7 +487,7 @@ jobs: outputs: tests: ${{ steps.detect-regression-tests.outputs.tests }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 - name: Detect regression tests need to be ran @@ -538,8 +542,8 @@ jobs: fail-fast: false matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }} steps: - - uses: actions/checkout@v4 - - uses: actions/download-artifact@v4.1.8 + - uses: actions/checkout@v5 + - uses: actions/download-artifact@v5 - uses: "./.github/actions/setup_extension" - name: Run minimal tests run: |- diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 320a57f4ddb..fd192b58baa 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -1,5 +1,8 @@ name: "CodeQL" +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + on: schedule: - cron: '59 23 * * 6' @@ -21,7 +24,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Initialize CodeQL uses: github/codeql-action/init@v4 diff --git a/.github/workflows/devcontainer.yml b/.github/workflows/devcontainer.yml index ebb855d3d89..7947baca8ec 100644 --- a/.github/workflows/devcontainer.yml +++ b/.github/workflows/devcontainer.yml @@ -7,6 +7,9 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + on: push: paths: @@ -34,7 +37,7 @@ jobs: type=sha - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: 'Login to GitHub Container Registry' uses: docker/login-action@v3 diff --git a/.github/workflows/flaky_test_debugging.yml b/.github/workflows/flaky_test_debugging.yml index e7e56215d4c..2208fca4fa3 100644 --- a/.github/workflows/flaky_test_debugging.yml +++ b/.github/workflows/flaky_test_debugging.yml @@ -3,6 +3,8 @@ run-name: Flaky test debugging - ${{ inputs.flaky_test }} (${{ inputs.flaky_test concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" on: workflow_dispatch: inputs: @@ -28,13 +30,13 @@ jobs: image: ${{ vars.build_image_name }}:${{ vars.pg16_version }}${{ vars.image_suffix }} options: --user root steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Configure, Build, and Install run: | echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV ./ci/build-citus.sh shell: bash - - uses: actions/upload-artifact@v4.6.0 + - uses: actions/upload-artifact@v5 with: name: build-${{ env.PG_MAJOR }} path: |- @@ -46,7 +48,7 @@ jobs: outputs: json: ${{ steps.parallelization.outputs.json }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: "./.github/actions/parallelization" id: parallelization with: @@ -67,7 +69,7 @@ jobs: fail-fast: false matrix: ${{ fromJson(needs.prepare_parallelization_matrix.outputs.json) }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: "./.github/actions/setup_extension" - name: Run minimal tests run: |- diff --git a/.github/workflows/packaging-test-pipelines.yml b/.github/workflows/packaging-test-pipelines.yml index 9e418d77c00..39659ac1552 100644 --- a/.github/workflows/packaging-test-pipelines.yml +++ b/.github/workflows/packaging-test-pipelines.yml @@ -1,5 +1,8 @@ name: Build tests in packaging images +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + on: pull_request: types: [opened, reopened,synchronize] @@ -19,7 +22,7 @@ jobs: pg_versions: ${{ steps.get-postgres-versions.outputs.pg_versions }} steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 2 - name: Get Postgres Versions @@ -58,7 +61,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Set Postgres and python parameters for rpm based distros run: | @@ -133,7 +136,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Set pg_config path and python parameters for deb based distros run: | diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index a88039c2017..5739222d4e2 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -1,4 +1,6 @@ name: Run Tests +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" on: workflow_call: inputs: @@ -55,7 +57,7 @@ jobs: --cap-add=SYS_NICE --security-opt seccomp=unconfined steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: "./.github/actions/setup_extension" - name: Fix PostgreSQL library permissions for symlink setup if: ${{ inputs.citus_libdir != '' }} diff --git a/configure b/configure index 2bffd71b57c..e98d8d9680a 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for Citus 14.0.1. +# Generated by GNU Autoconf 2.69 for Citus 14.1.0. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. @@ -579,8 +579,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='Citus' PACKAGE_TARNAME='citus' -PACKAGE_VERSION='14.0.1' -PACKAGE_STRING='Citus 14.0.1' +PACKAGE_VERSION='14.1.0' +PACKAGE_STRING='Citus 14.1.0' PACKAGE_BUGREPORT='' PACKAGE_URL='' @@ -1262,7 +1262,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures Citus 14.0.1 to adapt to many kinds of systems. +\`configure' configures Citus 14.1.0 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1324,7 +1324,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of Citus 14.0.1:";; + short | recursive ) echo "Configuration of Citus 14.1.0:";; esac cat <<\_ACEOF @@ -1429,7 +1429,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -Citus configure 14.0.1 +Citus configure 14.1.0 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -1912,7 +1912,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by Citus $as_me 14.0.1, which was +It was created by Citus $as_me 14.1.0, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -5395,7 +5395,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by Citus $as_me 14.0.1, which was +This file was extended by Citus $as_me 14.1.0, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -5457,7 +5457,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -Citus config.status 14.0.1 +Citus config.status 14.1.0 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" @@ -6162,4 +6162,3 @@ if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi - diff --git a/configure.ac b/configure.ac index ea504baeeec..0a46bc4a409 100644 --- a/configure.ac +++ b/configure.ac @@ -5,7 +5,7 @@ # everyone needing autoconf installed, the resulting files are checked # into the SCM. -AC_INIT([Citus], [14.0.1]) +AC_INIT([Citus], [14.1.0]) AC_COPYRIGHT([Copyright (c) Citus Data, Inc.]) # we'll need sed and awk for some of the version commands diff --git a/src/backend/columnar/citus_columnar.control b/src/backend/columnar/citus_columnar.control index 872831bd3b8..3785b52e543 100644 --- a/src/backend/columnar/citus_columnar.control +++ b/src/backend/columnar/citus_columnar.control @@ -1,6 +1,6 @@ # Columnar extension comment = 'Citus Columnar extension' -default_version = '14.0-1' +default_version = '14.1-1' module_pathname = '$libdir/citus_columnar' relocatable = false schema = pg_catalog diff --git a/src/backend/columnar/mod.c b/src/backend/columnar/mod.c index 98c2151276e..f125b663a39 100644 --- a/src/backend/columnar/mod.c +++ b/src/backend/columnar/mod.c @@ -22,7 +22,7 @@ #if PG_VERSION_NUM >= PG_VERSION_18 -PG_MODULE_MAGIC_EXT(.name = "citus_columnar", .version = "14.0.0"); +PG_MODULE_MAGIC_EXT(.name = "citus_columnar", .version = "14.1.0"); #else PG_MODULE_MAGIC; #endif diff --git a/src/backend/columnar/sql/citus_columnar--13.2-1--13.3-1.sql b/src/backend/columnar/sql/citus_columnar--13.2-1--13.3-1.sql new file mode 100644 index 00000000000..5ea61269a60 --- /dev/null +++ b/src/backend/columnar/sql/citus_columnar--13.2-1--13.3-1.sql @@ -0,0 +1,2 @@ +-- citus_columnar--13.2-1--13.3-1 +-- bump version to 13.3-1 diff --git a/src/backend/columnar/sql/citus_columnar--14.0-1--14.1-1.sql b/src/backend/columnar/sql/citus_columnar--14.0-1--14.1-1.sql new file mode 100644 index 00000000000..3f7049d35b5 --- /dev/null +++ b/src/backend/columnar/sql/citus_columnar--14.0-1--14.1-1.sql @@ -0,0 +1,2 @@ +-- citus_columnar--14.0-1--14.1-1 +-- bump version to 14.1-1 diff --git a/src/backend/columnar/sql/downgrades/citus_columnar--13.3-1--13.2-1.sql b/src/backend/columnar/sql/downgrades/citus_columnar--13.3-1--13.2-1.sql new file mode 100644 index 00000000000..9861642db32 --- /dev/null +++ b/src/backend/columnar/sql/downgrades/citus_columnar--13.3-1--13.2-1.sql @@ -0,0 +1,2 @@ +-- citus_columnar--13.3-1--13.2-1 +-- downgrade version to 13.2-1 diff --git a/src/backend/columnar/sql/downgrades/citus_columnar--14.1-1--14.0-1.sql b/src/backend/columnar/sql/downgrades/citus_columnar--14.1-1--14.0-1.sql new file mode 100644 index 00000000000..629a1445ba9 --- /dev/null +++ b/src/backend/columnar/sql/downgrades/citus_columnar--14.1-1--14.0-1.sql @@ -0,0 +1,2 @@ +-- citus_columnar--14.1-1--14.0-1 +-- downgrade version to 14.0-1 diff --git a/src/backend/distributed/README.md b/src/backend/distributed/README.md index a0e9c980d54..a9e6b89f4e6 100644 --- a/src/backend/distributed/README.md +++ b/src/backend/distributed/README.md @@ -1819,13 +1819,13 @@ Each field in the struct is documented in the comments within the `DistributeObj - **Returning tasks for `preprocess` and `postprocess`**: Ensure that either `preprocess` or `postprocess` returns a list of "DDLJob"s. If both functions return non-empty lists, then you would get an assertion failure. -- **Generic `preprocess` and `postprocess` methods**: The generic methods, `PreprocessAlterDistributedObjectStmt` and `PostprocessAlterDistributedObjectStmt`, serve as generic pre and post methods utilized for various statements. Both of these methods find application in distributed object operations. +- **Generic `preprocess` and `postprocess` methods**: The generic methods, `PreprocessAlterDistributedObjectStmtFromCoordinator` and `PostprocessAlterDistributedObjectStmtFromCoordinator`, serve as generic pre and post methods utilized for various statements. Both of these methods find application in distributed object operations. - - The `PreprocessAlterDistributedObjectStmt` method carries out the following operations: + - The `PreprocessAlterDistributedObjectStmtFromCoordinator` method carries out the following operations: - Performs a qualification operation. - Deparses the statement and generates a task list. - - As for the `PostprocessAlterDistributedObjectStmt` method, it: + - As for the `PostprocessAlterDistributedObjectStmtFromCoordinator` method, it: - Invokes the `EnsureAllObjectDependenciesExistOnAllNodes` function to propagate missing dependencies, both on the coordinator and the worker. - Before defining new `preprocess` or `postprocess` methods, it is advisable to assess whether the generic methods can be employed in your specific case. diff --git a/src/backend/distributed/citus.control b/src/backend/distributed/citus.control index 11531abbe5b..3ec96de6655 100644 --- a/src/backend/distributed/citus.control +++ b/src/backend/distributed/citus.control @@ -1,6 +1,6 @@ # Citus extension comment = 'Citus distributed database' -default_version = '14.0-1' +default_version = '14.1-1' module_pathname = '$libdir/citus' relocatable = false schema = pg_catalog diff --git a/src/backend/distributed/commands/alter_table.c b/src/backend/distributed/commands/alter_table.c index 433bb0fe386..b53b8cd1ac5 100644 --- a/src/backend/distributed/commands/alter_table.c +++ b/src/backend/distributed/commands/alter_table.c @@ -1525,7 +1525,9 @@ CreateCitusTableLike(TableConversionState *con) .colocateWithTableName = quote_qualified_identifier(con->schemaName, con->relationName) }; - CreateSingleShardTable(con->newRelationId, colocationParam); + bool allowFromWorkers = false; + CreateSingleShardTable(con->newRelationId, colocationParam, + allowFromWorkers); } else { diff --git a/src/backend/distributed/commands/common.c b/src/backend/distributed/commands/common.c index a9e43b23cbc..0fc64bae064 100644 --- a/src/backend/distributed/commands/common.c +++ b/src/backend/distributed/commands/common.c @@ -31,6 +31,13 @@ #include "distributed/worker_transaction.h" +static List * PreprocessAlterDistributedObjectStmtInternal(Node *stmt, + const char *queryString, + ProcessUtilityContext + processUtilityContext, + bool allowFromWorkers); + + /* * PostprocessCreateDistributedObjectFromCatalogStmt is a common function that can be used * for most objects during their creation phase. After the creation has happened locally @@ -104,9 +111,40 @@ PostprocessCreateDistributedObjectFromCatalogStmt(Node *stmt, const char *queryS /* - * PreprocessAlterDistributedObjectStmt handles any updates to distributed objects by - * creating the fully qualified sql to apply to all workers after checking all - * predconditions that apply to propagating changes. + * PreprocessAlterDistributedObjectStmtFromCoordinator is a wrapper around + * PreprocessAlterDistributedObjectStmtInternal to be used when altering distributed + * objects that we allow altering only from the coordinator. + */ +List * +PreprocessAlterDistributedObjectStmtFromCoordinator(Node *stmt, const char *queryString, + ProcessUtilityContext + processUtilityContext) +{ + return PreprocessAlterDistributedObjectStmtInternal(stmt, queryString, + processUtilityContext, false); +} + + +/* + * PreprocessAlterDistributedObjectStmtFromAnyNode is a wrapper around + * PreprocessAlterDistributedObjectStmtInternal to be used when altering distributed + * objects that we allow altering from any node. + */ +List * +PreprocessAlterDistributedObjectStmtFromAnyNode(Node *stmt, + const char *queryString, + ProcessUtilityContext + processUtilityContext) +{ + return PreprocessAlterDistributedObjectStmtInternal(stmt, queryString, + processUtilityContext, true); +} + + +/* + * PreprocessAlterDistributedObjectStmtInternal handles any updates to distributed + * objects by creating the fully qualified sql to apply to all other nodes after checking + * all predconditions that apply to propagating changes. * * Preconditions are (in order): * - not in a CREATE/ALTER EXTENSION code block @@ -114,17 +152,18 @@ PostprocessCreateDistributedObjectFromCatalogStmt(Node *stmt, const char *queryS * - object being altered is distributed * - any object specific feature flag is turned on when a feature flag is available * - * Once we conclude to propagate the changes to the workers we make sure that the command - * has been executed on the coordinator and force any ongoing transaction to run in + * Once we conclude to propagate the changes to other nodes we make sure that the command + * has been executed on the local node and force any ongoing transaction to run in * sequential mode. If any of these steps fail we raise an error to inform the user. * * Lastly we recreate a fully qualified version of the original sql and prepare the tasks - * to send these sql commands to the workers. These tasks include instructions to prevent + * to send these sql commands to other nodes. These tasks include instructions to prevent * recursion of propagation with Citus' MX functionality. */ -List * -PreprocessAlterDistributedObjectStmt(Node *stmt, const char *queryString, - ProcessUtilityContext processUtilityContext) +static List * +PreprocessAlterDistributedObjectStmtInternal(Node *stmt, const char *queryString, + ProcessUtilityContext processUtilityContext, + bool allowFromWorkers) { const DistributeObjectOps *ops = GetDistributeObjectOps(stmt); Assert(ops != NULL); @@ -145,7 +184,15 @@ PreprocessAlterDistributedObjectStmt(Node *stmt, const char *queryString, return NIL; } - EnsureCoordinator(); + if (allowFromWorkers) + { + EnsurePropagationToCoordinator(); + } + else + { + EnsureCoordinator(); + } + EnsureSequentialMode(ops->objectType); QualifyTreeNode(stmt); @@ -155,14 +202,14 @@ PreprocessAlterDistributedObjectStmt(Node *stmt, const char *queryString, (void *) sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } /* - * PostprocessAlterDistributedObjectStmt is the counter part of - * PreprocessAlterDistributedObjectStmt that should be executed after the object has been - * changed locally. + * PostprocessAlterDistributedObjectStmtFromCoordinator is the counter part of + * PreprocessAlterDistributedObjectStmtFromCoordinator that should be executed after the + * object has been changed locally. * * We perform the same precondition checks as before to skip this operation if any of the * failed during preprocessing. Since we already raised an error on other checks we don't @@ -173,7 +220,7 @@ PreprocessAlterDistributedObjectStmt(Node *stmt, const char *queryString, * they get created on the workers before we send the command list to the workers. */ List * -PostprocessAlterDistributedObjectStmt(Node *stmt, const char *queryString) +PostprocessAlterDistributedObjectStmtFromCoordinator(Node *stmt, const char *queryString) { const DistributeObjectOps *ops = GetDistributeObjectOps(stmt); Assert(ops != NULL); diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 1093d5bf7ae..c04f5951aca 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -92,6 +92,7 @@ #include "distributed/shard_split.h" #include "distributed/shard_transfer.h" #include "distributed/shared_library_init.h" +#include "distributed/tenant_schema_metadata.h" #include "distributed/utils/distribution_column_map.h" #include "distributed/version_compat.h" #include "distributed/worker_protocol.h" @@ -142,11 +143,13 @@ static CitusTableParams DecideCitusTableParams(CitusTableType tableType, DistributedTableParams * distributedTableParams); static void CreateCitusTable(Oid relationId, CitusTableType tableType, - DistributedTableParams *distributedTableParams); + DistributedTableParams *distributedTableParams, + bool allowFromWorkers); static void ConvertCitusLocalTableToTableType(Oid relationId, CitusTableType tableType, DistributedTableParams * - distributedTableParams); + distributedTableParams, + bool allowFromWorkers); static void CreateHashDistributedTableShards(Oid relationId, int shardCount, Oid colocatedTableId, bool localTableEmpty); static void CreateSingleShardTableShard(Oid relationId, Oid colocatedTableId, @@ -163,13 +166,12 @@ static Oid SupportFunctionForColumn(Var *partitionColumn, Oid accessMethodId, int16 supportFunctionNumber); static void EnsureLocalTableEmptyIfNecessary(Oid relationId, char distributionMethod); static bool ShouldLocalTableBeEmpty(Oid relationId, char distributionMethod); -static void EnsureCitusTableCanBeCreated(Oid relationOid); +static void EnsureCitusTableCanBeCreated(Oid relationOid, bool allowFromWorkers); static void PropagatePrerequisiteObjectsForDistributedTable(Oid relationId); static void EnsureDistributedSequencesHaveOneType(Oid relationId, List *seqInfoList); static void CopyLocalDataIntoShards(Oid distributedTableId); -static List * TupleDescColumnNameList(TupleDesc tupleDescriptor); - +static uint64 CopyFromLocalTableIntoDistTable(Oid localTableId, Oid distributedTableId); static bool DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationDesc, Var *distributionColumn); static int numeric_typmod_scale(int32 typmod); @@ -303,7 +305,9 @@ create_distributed_table(PG_FUNCTION_ARGS) .colocationParamType = COLOCATE_WITH_TABLE_LIKE_OPT, .colocateWithTableName = colocateWithTableName, }; - CreateSingleShardTable(relationId, colocationParam); + bool allowFromWorkers = false; + CreateSingleShardTable(relationId, colocationParam, + allowFromWorkers); } PG_RETURN_VOID(); @@ -417,7 +421,8 @@ CreateDistributedTableConcurrently(Oid relationId, char *distributionColumnName, DropOrphanedResourcesInSeparateTransaction(); - EnsureCitusTableCanBeCreated(relationId); + bool allowFromWorkers = false; + EnsureCitusTableCanBeCreated(relationId, allowFromWorkers); EnsureValidDistributionColumn(relationId, distributionColumnName); @@ -932,15 +937,23 @@ create_reference_table(PG_FUNCTION_ARGS) /* * EnsureCitusTableCanBeCreated checks if - * - we are on the coordinator + * - we are on the coordinator if allowFromWorkers = false or else if we can ensure propagation to coordinator * - the current user is the owner of the table * - relation kind is supported * - relation is not a shard */ static void -EnsureCitusTableCanBeCreated(Oid relationOid) +EnsureCitusTableCanBeCreated(Oid relationOid, bool allowFromWorkers) { - EnsureCoordinator(); + if (allowFromWorkers) + { + EnsurePropagationToCoordinator(); + } + else + { + EnsureCoordinator(); + } + EnsureRelationExists(relationOid); EnsureTableOwner(relationOid); ErrorIfTemporaryTable(relationOid); @@ -1025,9 +1038,11 @@ CreateDistributedTable(Oid relationId, char *distributionColumnName, }, .shardCount = shardCount, .shardCountIsStrict = shardCountIsStrict, - .distributionColumnName = distributionColumnName + .distributionColumnName = distributionColumnName, }; - CreateCitusTable(relationId, tableType, &distributedTableParams); + + bool allowFromWorkers = false; + CreateCitusTable(relationId, tableType, &distributedTableParams, allowFromWorkers); } @@ -1037,17 +1052,19 @@ CreateDistributedTable(Oid relationId, char *distributionColumnName, void CreateReferenceTable(Oid relationId) { + bool allowFromWorkers = false; if (IsCitusTableType(relationId, CITUS_LOCAL_TABLE)) { /* * Create the shard of given Citus local table on workers to convert * it into a reference table. */ - ConvertCitusLocalTableToTableType(relationId, REFERENCE_TABLE, NULL); + ConvertCitusLocalTableToTableType(relationId, REFERENCE_TABLE, NULL, + allowFromWorkers); } else { - CreateCitusTable(relationId, REFERENCE_TABLE, NULL); + CreateCitusTable(relationId, REFERENCE_TABLE, NULL, allowFromWorkers); } } @@ -1057,28 +1074,31 @@ CreateReferenceTable(Oid relationId) * doesn't have a shard key. */ void -CreateSingleShardTable(Oid relationId, ColocationParam colocationParam) +CreateSingleShardTable(Oid relationId, ColocationParam colocationParam, + bool allowFromWorkers) { DistributedTableParams distributedTableParams = { .colocationParam = colocationParam, .shardCount = 1, .shardCountIsStrict = true, - .distributionColumnName = NULL + .distributionColumnName = NULL, }; if (IsCitusTableType(relationId, CITUS_LOCAL_TABLE)) { /* * Create the shard of given Citus local table on appropriate node - * and drop the local one to convert it into a single-shard distributed - * table. + * and drop the one on the coordinator to convert it into a + * single-shard distributed table. */ ConvertCitusLocalTableToTableType(relationId, SINGLE_SHARD_DISTRIBUTED, - &distributedTableParams); + &distributedTableParams, + allowFromWorkers); } else { - CreateCitusTable(relationId, SINGLE_SHARD_DISTRIBUTED, &distributedTableParams); + CreateCitusTable(relationId, SINGLE_SHARD_DISTRIBUTED, &distributedTableParams, + allowFromWorkers); } } @@ -1098,7 +1118,8 @@ CreateSingleShardTable(Oid relationId, ColocationParam colocationParam) */ static void CreateCitusTable(Oid relationId, CitusTableType tableType, - DistributedTableParams *distributedTableParams) + DistributedTableParams *distributedTableParams, + bool allowFromWorkers) { if ((tableType == HASH_DISTRIBUTED || tableType == APPEND_DISTRIBUTED || tableType == SINGLE_SHARD_DISTRIBUTED || @@ -1109,7 +1130,7 @@ CreateCitusTable(Oid relationId, CitusTableType tableType, "not be otherwise"))); } - EnsureCitusTableCanBeCreated(relationId); + EnsureCitusTableCanBeCreated(relationId, allowFromWorkers); /* allow creating a Citus table on an empty cluster */ InsertCoordinatorIfClusterEmpty(); @@ -1248,8 +1269,19 @@ CreateCitusTable(Oid relationId, CitusTableType tableType, * mutations happen on the colocation group with regards to its placements. It is * important that we have already copied any reference tables before acquiring this * lock as these are competing operations. + * + * If we're on a worker, we acquire the lock on the coordinator via the remote + * metadata connection to the coordinator. */ - LockColocationId(colocationId, ShareLock); + if (IsCoordinator()) + { + LockColocationId(colocationId, ShareLock); + } + else + { + char *command = LockColocationIdCommand(colocationId, ShareLock); + SendCommandToCoordinator(command); + } /* we need to calculate these variables before creating distributed metadata */ bool localTableEmpty = TableEmpty(relationId); @@ -1296,6 +1328,43 @@ CreateCitusTable(Oid relationId, CitusTableType tableType, SyncCitusTableMetadata(relationId); } + /* + * We need to adjust sequence ranges and the function calls + * used it column default expressions using nextval() on + * all workers, and we don't need to do that on the + * coordinator because it can always use the full range for + * the sequences and can use the user-provided column default + * expressions -that typically use nextval()-. And at this + * point all such work is already done for remote workers. + * + * Specifically, SyncCitusTableMetadata() handles + * the sequence ranges for identity column sequences and + * PropagatePrerequisiteObjectsForDistributedTable() handles + * the same for dependent sequences, i.e., sequences backing + * the serial based columns or the ones backing the columns + * that look like ".. DEFAULT nextval('..') ...". + * SyncCitusTableMetadata() also adjusts the function calls + * for the columns using dependent sequences - note that we + * don't adjust the function calls for identity columns. + * + * For this reason, here we do the same on the local node + * only if it's not the coordinator, as we're only interested + * in doing this on workers. + */ + if (!IsCoordinator()) + { + AdjustDependentSeqRangesOnLocalWorker(relationId); + + /* + * Note that AdjustDependentSeqRangesOnLocalWorker() doesn't adjust + * sequence ranges for identity columns, so we need to adjust them + * separately here. + */ + AdjustIdentityColumnSeqRangesOnLocalWorker(relationId); + + AdjustNextValColumnDefaultsOnLocalWorker(relationId); + } + /* * We've a custom way of foreign key graph invalidation, * see InvalidateForeignKeyGraph(). @@ -1336,7 +1405,8 @@ CreateCitusTable(Oid relationId, CitusTableType tableType, .distributionColumnName = distributedTableParams->distributionColumnName, }; CreateCitusTable(partitionRelationId, tableType, - &childDistributedTableParams); + &childDistributedTableParams, + allowFromWorkers); } MemoryContextSwitchTo(oldContext); @@ -1369,13 +1439,14 @@ CreateCitusTable(Oid relationId, CitusTableType tableType, * given table type. * * This only supports converting Citus local tables to reference tables - * (by replicating the shard to workers) and single-shard distributed - * tables (by replicating the shard to the appropriate worker and dropping - * the local one). + * (by replicating the coordinator placement to workers) and single-shard + * distributed tables (by replicating the coordinator placement to the + * appropriate worker and dropping the one on the coordinator). */ static void ConvertCitusLocalTableToTableType(Oid relationId, CitusTableType tableType, - DistributedTableParams *distributedTableParams) + DistributedTableParams *distributedTableParams, + bool allowFromWorkers) { if (!IsCitusTableType(relationId, CITUS_LOCAL_TABLE)) { @@ -1394,7 +1465,7 @@ ConvertCitusLocalTableToTableType(Oid relationId, CitusTableType tableType, "not be otherwise"))); } - EnsureCitusTableCanBeCreated(relationId); + EnsureCitusTableCanBeCreated(relationId, allowFromWorkers); Relation relation = try_relation_open(relationId, ExclusiveLock); if (relation == NULL) @@ -1459,20 +1530,42 @@ ConvertCitusLocalTableToTableType(Oid relationId, CitusTableType tableType, EnsureReferenceTablesExistOnAllNodes(); - LockColocationId(colocationId, ShareLock); + /* + * While adding tables to a colocation group we need to make sure no concurrent + * mutations happen on the colocation group with regards to its placements. It is + * important that we have already copied any reference tables before acquiring this + * lock as these are competing operations. + * + * If we're on a worker, we acquire the lock on the coordinator via the remote + * metadata connection to the coordinator. + */ + if (IsCoordinator()) + { + LockColocationId(colocationId, ShareLock); + } + else + { + char *command = LockColocationIdCommand(colocationId, ShareLock); + SendCommandToCoordinator(command); + } /* * When converting to a single shard table, we want to drop the placement * on the coordinator, but only if transferring to a different node. In that - * case, shouldDropLocalPlacement is true. When converting to a reference + * case, shouldDropCoordPlacement is true. When converting to a reference * table, we always keep the placement on the coordinator, so for reference - * tables shouldDropLocalPlacement is always false. + * tables shouldDropCoordPlacement is always false. */ - bool shouldDropLocalPlacement = false; + bool shouldDropCoordPlacement = false; List *targetNodeList = NIL; if (tableType == SINGLE_SHARD_DISTRIBUTED) { + /* + * Note that when run from a worker, SingleShardTableColocationNodeId() + * will acquire the lock on pg_dist_node via the coordinator as well, + * if needed. + */ uint32 targetNodeId = SingleShardTableColocationNodeId(colocationId); if (targetNodeId != CoordinatorNodeIfAddedAsWorkerOrError()->nodeId) { @@ -1480,11 +1573,22 @@ ConvertCitusLocalTableToTableType(Oid relationId, CitusTableType tableType, WorkerNode *targetNode = FindNodeWithNodeId(targetNodeId, missingOk); targetNodeList = list_make1(targetNode); - shouldDropLocalPlacement = true; + shouldDropCoordPlacement = true; } } else if (tableType == REFERENCE_TABLE) { + /* + * If we're on a worker, first acquire the lock on the coordinator via + * the remote metadata connection to the coordinator as superuser. Fwiw, + * we'll acquire the lock on the local node as well via + * ActivePrimaryNonCoordinatorNodeList(). + */ + if (!IsCoordinator()) + { + LockPgDistNodeOnCoordinatorViaSuperUser(ShareLock); + } + targetNodeList = ActivePrimaryNonCoordinatorNodeList(ShareLock); targetNodeList = SortList(targetNodeList, CompareWorkerNodes); } @@ -1500,12 +1604,12 @@ ConvertCitusLocalTableToTableType(Oid relationId, CitusTableType tableType, NoneDistTableReplicateCoordinatorPlacement(relationId, targetNodeList); } - if (shouldDropLocalPlacement) + if (shouldDropCoordPlacement) { /* - * We don't yet drop the local placement before handling partitions. - * Otherewise, local shard placements of the partitions will be gone - * before we create them on workers. + * We don't yet drop the coordinator placement before handling partitions. + * Otherewise, coordinator shard placements of the partitions will be gone + * before we create them on remote nodes. * * However, we need to delete the related entry from pg_dist_placement * before distributing partitions (if any) because we need a sane metadata @@ -1550,14 +1654,15 @@ ConvertCitusLocalTableToTableType(Oid relationId, CitusTableType tableType, .distributionColumnName = distributedTableParams->distributionColumnName, }; ConvertCitusLocalTableToTableType(partitionRelationId, tableType, - &childDistributedTableParams); + &childDistributedTableParams, + allowFromWorkers); } MemoryContextSwitchTo(oldContext); MemoryContextDelete(citusPartitionContext); } - if (shouldDropLocalPlacement) + if (shouldDropCoordPlacement) { NoneDistTableDropCoordinatorPlacementTable(relationId); } @@ -1789,7 +1894,7 @@ EnsureDistributedSequencesHaveOneType(Oid relationId, List *seqInfoList) EnsureSequenceTypeSupported(sequenceOid, attributeTypeId, relationId); /* - * Alter the sequence's data type in the coordinator if needed. + * Alter the sequence's data type in the current node if needed. * * First, we should only change the sequence type if the column * is a supported sequence type. For example, if a sequence is used @@ -2071,7 +2176,7 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn, EnsureLocalTableEmptyIfNecessary(relationId, distributionMethod); /* user really wants triggers? */ - if (EnableUnsafeTriggers) + if (EnableUnsafeTriggers || IsTenantSchema(get_rel_namespace(relationId))) { ErrorIfRelationHasUnsupportedTrigger(relationId); } @@ -2657,7 +2762,7 @@ CopyLocalDataIntoShards(Oid distributedTableId) * opens a connection and starts a COPY for each shard placement that will have * data. */ -uint64 +static uint64 CopyFromLocalTableIntoDistTable(Oid localTableId, Oid distributedTableId) { /* take an ExclusiveLock to block all operations except SELECT */ @@ -2689,7 +2794,7 @@ CopyFromLocalTableIntoDistTable(Oid localTableId, Oid distributedTableId) /* get the table columns for distributed table */ TupleDesc destTupleDescriptor = RelationGetDescr(distributedRelation); - List *columnNameList = TupleDescColumnNameList(destTupleDescriptor); + List *columnNameList = CopyablePlainColumnNameListFromTupleDesc(destTupleDescriptor); RelationClose(distributedRelation); @@ -2800,11 +2905,11 @@ DoCopyFromLocalTableIntoShards(Relation localRelation, /* - * TupleDescColumnNameList returns a list of column names for the given tuple - * descriptor as plain strings. + * CopyablePlainColumnNameListFromTupleDesc returns the list of copyable column + * names for the given tuple descriptor as plain strings. */ -static List * -TupleDescColumnNameList(TupleDesc tupleDescriptor) +List * +CopyablePlainColumnNameListFromTupleDesc(TupleDesc tupleDescriptor) { List *columnNameList = NIL; diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index e1e77a7eb62..40f3df32ad8 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -189,7 +189,16 @@ EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target, * This guarantees that all active nodes will have the object, because they will * either get it now, or get it in citus_add_node after this transaction finishes and * the pg_dist_object record becomes visible. + * + * If we're on a worker, first acquire the lock on the coordinator via the remote + * metadata connection to the coordinator as superuser. Fwiw, we'll acquire the + * lock on the local node as well via ActivePrimaryRemoteNodeList(). */ + if (!IsCoordinator()) + { + LockPgDistNodeOnCoordinatorViaSuperUser(RowShareLock); + } + List *remoteNodeList = ActivePrimaryRemoteNodeList(RowShareLock); /* diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index 5acecf099b5..e0656a13eaa 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -32,8 +32,8 @@ static DistributeObjectOps NoDistributeOps = { static DistributeObjectOps Aggregate_AlterObjectSchema = { .deparse = DeparseAlterFunctionSchemaStmt, .qualify = QualifyAlterFunctionSchemaStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_FUNCTION, .operationType = DIST_OPS_ALTER, .address = AlterFunctionSchemaStmtObjectAddress, @@ -42,8 +42,8 @@ static DistributeObjectOps Aggregate_AlterObjectSchema = { static DistributeObjectOps Aggregate_AlterOwner = { .deparse = DeparseAlterFunctionOwnerStmt, .qualify = QualifyAlterFunctionOwnerStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_FUNCTION, .operationType = DIST_OPS_ALTER, .address = AlterFunctionOwnerObjectAddress, @@ -71,7 +71,7 @@ static DistributeObjectOps Aggregate_Drop = { static DistributeObjectOps Aggregate_Rename = { .deparse = DeparseRenameFunctionStmt, .qualify = QualifyRenameFunctionStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_FUNCTION, .operationType = DIST_OPS_ALTER, @@ -90,7 +90,7 @@ static DistributeObjectOps Aggregate_Grant = { static DistributeObjectOps Any_AlterEnum = { .deparse = DeparseAlterEnumStmt, .qualify = QualifyAlterEnumStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_TYPE, .operationType = DIST_OPS_ALTER, @@ -118,7 +118,7 @@ static DistributeObjectOps Any_AlterExtensionContents = { static DistributeObjectOps Any_AlterForeignServer = { .deparse = DeparseAlterForeignServerStmt, .qualify = NULL, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_FOREIGN_SERVER, .operationType = DIST_OPS_ALTER, @@ -309,7 +309,7 @@ static DistributeObjectOps Any_DropRole = { static DistributeObjectOps Role_Comment = { .deparse = DeparseCommentStmt, .qualify = NULL, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_DATABASE, .operationType = DIST_OPS_ALTER, @@ -446,8 +446,8 @@ static DistributeObjectOps Attribute_Rename = { static DistributeObjectOps Collation_AlterObjectSchema = { .deparse = DeparseAlterCollationSchemaStmt, .qualify = QualifyAlterCollationSchemaStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_COLLATION, .operationType = DIST_OPS_ALTER, .address = AlterCollationSchemaStmtObjectAddress, @@ -456,8 +456,8 @@ static DistributeObjectOps Collation_AlterObjectSchema = { static DistributeObjectOps Collation_AlterOwner = { .deparse = DeparseAlterCollationOwnerStmt, .qualify = QualifyAlterCollationOwnerStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_COLLATION, .operationType = DIST_OPS_ALTER, .address = AlterCollationOwnerObjectAddress, @@ -485,7 +485,7 @@ static DistributeObjectOps Collation_Drop = { static DistributeObjectOps Collation_Rename = { .deparse = DeparseRenameCollationStmt, .qualify = QualifyRenameCollationStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_COLLATION, .operationType = DIST_OPS_ALTER, @@ -495,8 +495,8 @@ static DistributeObjectOps Collation_Rename = { static DistributeObjectOps Database_AlterOwner = { .deparse = DeparseAlterDatabaseOwnerStmt, .qualify = NULL, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_DATABASE, .operationType = DIST_OPS_ALTER, .featureFlag = &EnableAlterDatabaseOwner, @@ -573,7 +573,7 @@ static DistributeObjectOps Database_Set = { static DistributeObjectOps Database_Comment = { .deparse = DeparseCommentStmt, .qualify = NULL, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_DATABASE, .operationType = DIST_OPS_ALTER, @@ -595,8 +595,8 @@ static DistributeObjectOps Database_Rename = { static DistributeObjectOps Domain_Alter = { .deparse = DeparseAlterDomainStmt, .qualify = QualifyAlterDomainStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_DOMAIN, .operationType = DIST_OPS_ALTER, .address = AlterDomainStmtObjectAddress, @@ -605,8 +605,8 @@ static DistributeObjectOps Domain_Alter = { static DistributeObjectOps Domain_AlterObjectSchema = { .deparse = DeparseAlterDomainSchemaStmt, .qualify = QualifyAlterDomainSchemaStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_DOMAIN, .operationType = DIST_OPS_ALTER, .address = AlterTypeSchemaStmtObjectAddress, @@ -615,8 +615,8 @@ static DistributeObjectOps Domain_AlterObjectSchema = { static DistributeObjectOps Domain_AlterOwner = { .deparse = DeparseAlterDomainOwnerStmt, .qualify = QualifyAlterDomainOwnerStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_DOMAIN, .operationType = DIST_OPS_ALTER, .address = AlterDomainOwnerStmtObjectAddress, @@ -634,7 +634,7 @@ static DistributeObjectOps Domain_Drop = { static DistributeObjectOps Domain_Rename = { .deparse = DeparseRenameDomainStmt, .qualify = QualifyRenameDomainStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_DOMAIN, .operationType = DIST_OPS_ALTER, @@ -645,7 +645,7 @@ static DistributeObjectOps Domain_Rename = { static DistributeObjectOps Domain_RenameConstraint = { .deparse = DeparseDomainRenameConstraintStmt, .qualify = QualifyDomainRenameConstraintStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_DOMAIN, .operationType = DIST_OPS_ALTER, @@ -700,7 +700,7 @@ static DistributeObjectOps ForeignServer_Grant = { static DistributeObjectOps ForeignServer_Rename = { .deparse = DeparseAlterForeignServerRenameStmt, .qualify = NULL, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_FOREIGN_SERVER, .operationType = DIST_OPS_ALTER, @@ -710,8 +710,8 @@ static DistributeObjectOps ForeignServer_Rename = { static DistributeObjectOps ForeignServer_AlterOwner = { .deparse = DeparseAlterForeignServerOwnerStmt, .qualify = NULL, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_FOREIGN_SERVER, .operationType = DIST_OPS_ALTER, .address = AlterForeignServerOwnerStmtObjectAddress, @@ -738,8 +738,8 @@ static DistributeObjectOps Function_AlterObjectDepends = { static DistributeObjectOps Function_AlterObjectSchema = { .deparse = DeparseAlterFunctionSchemaStmt, .qualify = QualifyAlterFunctionSchemaStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_FUNCTION, .operationType = DIST_OPS_ALTER, .address = AlterFunctionSchemaStmtObjectAddress, @@ -748,8 +748,8 @@ static DistributeObjectOps Function_AlterObjectSchema = { static DistributeObjectOps Function_AlterOwner = { .deparse = DeparseAlterFunctionOwnerStmt, .qualify = QualifyAlterFunctionOwnerStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_FUNCTION, .operationType = DIST_OPS_ALTER, .address = AlterFunctionOwnerObjectAddress, @@ -785,7 +785,7 @@ static DistributeObjectOps View_Drop = { static DistributeObjectOps Function_Rename = { .deparse = DeparseRenameFunctionStmt, .qualify = QualifyRenameFunctionStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_FUNCTION, .operationType = DIST_OPS_ALTER, @@ -831,8 +831,8 @@ static DistributeObjectOps Procedure_AlterObjectDepends = { static DistributeObjectOps Procedure_AlterObjectSchema = { .deparse = DeparseAlterFunctionSchemaStmt, .qualify = QualifyAlterFunctionSchemaStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_FUNCTION, .operationType = DIST_OPS_ALTER, .address = AlterFunctionSchemaStmtObjectAddress, @@ -841,8 +841,8 @@ static DistributeObjectOps Procedure_AlterObjectSchema = { static DistributeObjectOps Procedure_AlterOwner = { .deparse = DeparseAlterFunctionOwnerStmt, .qualify = QualifyAlterFunctionOwnerStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_FUNCTION, .operationType = DIST_OPS_ALTER, .address = AlterFunctionOwnerObjectAddress, @@ -869,7 +869,7 @@ static DistributeObjectOps Procedure_Grant = { static DistributeObjectOps Procedure_Rename = { .deparse = DeparseRenameFunctionStmt, .qualify = QualifyRenameFunctionStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_FUNCTION, .operationType = DIST_OPS_ALTER, @@ -880,7 +880,7 @@ static DistributeObjectOps Publication_Alter = { .deparse = DeparseAlterPublicationStmt, .qualify = QualifyAlterPublicationStmt, .preprocess = PreprocessAlterPublicationStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_PUBLICATION, .operationType = DIST_OPS_ALTER, .address = AlterPublicationStmtObjectAddress, @@ -889,8 +889,8 @@ static DistributeObjectOps Publication_Alter = { static DistributeObjectOps Publication_AlterOwner = { .deparse = DeparseAlterPublicationOwnerStmt, .qualify = NULL, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_PUBLICATION, .operationType = DIST_OPS_ALTER, .address = AlterPublicationOwnerStmtObjectAddress, @@ -908,7 +908,7 @@ static DistributeObjectOps Publication_Drop = { static DistributeObjectOps Publication_Rename = { .deparse = DeparseRenamePublicationStmt, .qualify = NULL, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_PUBLICATION, .operationType = DIST_OPS_ALTER, @@ -990,7 +990,7 @@ static DistributeObjectOps Sequence_Rename = { static DistributeObjectOps TextSearchConfig_Alter = { .deparse = DeparseAlterTextSearchConfigurationStmt, .qualify = QualifyAlterTextSearchConfigurationStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_TSCONFIGURATION, .operationType = DIST_OPS_ALTER, @@ -1000,8 +1000,8 @@ static DistributeObjectOps TextSearchConfig_Alter = { static DistributeObjectOps TextSearchConfig_AlterObjectSchema = { .deparse = DeparseAlterTextSearchConfigurationSchemaStmt, .qualify = QualifyAlterTextSearchConfigurationSchemaStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_TSCONFIGURATION, .operationType = DIST_OPS_ALTER, .address = AlterTextSearchConfigurationSchemaStmtObjectAddress, @@ -1010,8 +1010,8 @@ static DistributeObjectOps TextSearchConfig_AlterObjectSchema = { static DistributeObjectOps TextSearchConfig_AlterOwner = { .deparse = DeparseAlterTextSearchConfigurationOwnerStmt, .qualify = QualifyAlterTextSearchConfigurationOwnerStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_TSCONFIGURATION, .operationType = DIST_OPS_ALTER, .address = AlterTextSearchConfigurationOwnerObjectAddress, @@ -1025,7 +1025,7 @@ static DistributeObjectOps TextSearchConfig_Comment = { * and adress function */ .qualify = QualifyTextSearchConfigurationCommentStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_TSCONFIGURATION, .operationType = DIST_OPS_ALTER, @@ -1054,7 +1054,7 @@ static DistributeObjectOps TextSearchConfig_Drop = { static DistributeObjectOps TextSearchConfig_Rename = { .deparse = DeparseRenameTextSearchConfigurationStmt, .qualify = QualifyRenameTextSearchConfigurationStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_TSCONFIGURATION, .operationType = DIST_OPS_ALTER, @@ -1064,7 +1064,7 @@ static DistributeObjectOps TextSearchConfig_Rename = { static DistributeObjectOps TextSearchDict_Alter = { .deparse = DeparseAlterTextSearchDictionaryStmt, .qualify = QualifyAlterTextSearchDictionaryStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_TSDICTIONARY, .operationType = DIST_OPS_ALTER, @@ -1074,8 +1074,8 @@ static DistributeObjectOps TextSearchDict_Alter = { static DistributeObjectOps TextSearchDict_AlterObjectSchema = { .deparse = DeparseAlterTextSearchDictionarySchemaStmt, .qualify = QualifyAlterTextSearchDictionarySchemaStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_TSDICTIONARY, .operationType = DIST_OPS_ALTER, .address = AlterTextSearchDictionarySchemaStmtObjectAddress, @@ -1084,8 +1084,8 @@ static DistributeObjectOps TextSearchDict_AlterObjectSchema = { static DistributeObjectOps TextSearchDict_AlterOwner = { .deparse = DeparseAlterTextSearchDictionaryOwnerStmt, .qualify = QualifyAlterTextSearchDictionaryOwnerStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_TSDICTIONARY, .operationType = DIST_OPS_ALTER, .address = AlterTextSearchDictOwnerObjectAddress, @@ -1094,7 +1094,7 @@ static DistributeObjectOps TextSearchDict_AlterOwner = { static DistributeObjectOps TextSearchDict_Comment = { .deparse = DeparseCommentStmt, .qualify = QualifyTextSearchDictionaryCommentStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_TSDICTIONARY, .operationType = DIST_OPS_ALTER, @@ -1123,7 +1123,7 @@ static DistributeObjectOps TextSearchDict_Drop = { static DistributeObjectOps TextSearchDict_Rename = { .deparse = DeparseRenameTextSearchDictionaryStmt, .qualify = QualifyRenameTextSearchDictionaryStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_TSDICTIONARY, .operationType = DIST_OPS_ALTER, @@ -1142,8 +1142,8 @@ static DistributeObjectOps Trigger_AlterObjectDepends = { static DistributeObjectOps Routine_AlterObjectSchema = { .deparse = DeparseAlterFunctionSchemaStmt, .qualify = QualifyAlterFunctionSchemaStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_FUNCTION, .operationType = DIST_OPS_ALTER, .address = AlterFunctionSchemaStmtObjectAddress, @@ -1152,8 +1152,8 @@ static DistributeObjectOps Routine_AlterObjectSchema = { static DistributeObjectOps Routine_AlterOwner = { .deparse = DeparseAlterFunctionOwnerStmt, .qualify = QualifyAlterFunctionOwnerStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_FUNCTION, .operationType = DIST_OPS_ALTER, .address = AlterFunctionOwnerObjectAddress, @@ -1180,7 +1180,7 @@ static DistributeObjectOps Routine_Grant = { static DistributeObjectOps Routine_Rename = { .deparse = DeparseRenameFunctionStmt, .qualify = QualifyRenameFunctionStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_FUNCTION, .operationType = DIST_OPS_ALTER, @@ -1190,7 +1190,7 @@ static DistributeObjectOps Routine_Rename = { static DistributeObjectOps Schema_AlterOwner = { .deparse = DeparseAlterSchemaOwnerStmt, .qualify = NULL, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromAnyNode, .operationType = DIST_OPS_ALTER, .postprocess = NULL, .address = AlterSchemaOwnerStmtObjectAddress, @@ -1217,7 +1217,7 @@ static DistributeObjectOps Schema_Grant = { static DistributeObjectOps Schema_Rename = { .deparse = DeparseAlterSchemaRenameStmt, .qualify = NULL, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromAnyNode, .postprocess = NULL, .objectType = OBJECT_SCHEMA, .operationType = DIST_OPS_ALTER, @@ -1299,8 +1299,8 @@ static DistributeObjectOps Table_Drop = { static DistributeObjectOps Type_AlterObjectSchema = { .deparse = DeparseAlterTypeSchemaStmt, .qualify = QualifyAlterTypeSchemaStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_TYPE, .operationType = DIST_OPS_ALTER, .address = AlterTypeSchemaStmtObjectAddress, @@ -1326,8 +1326,8 @@ static DistributeObjectOps View_AlterObjectSchema = { static DistributeObjectOps Type_AlterOwner = { .deparse = DeparseAlterTypeOwnerStmt, .qualify = QualifyAlterTypeOwnerStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, - .postprocess = PostprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, + .postprocess = PostprocessAlterDistributedObjectStmtFromCoordinator, .objectType = OBJECT_TYPE, .operationType = DIST_OPS_ALTER, .address = AlterTypeOwnerObjectAddress, @@ -1336,7 +1336,7 @@ static DistributeObjectOps Type_AlterOwner = { static DistributeObjectOps Type_AlterTable = { .deparse = DeparseAlterTypeStmt, .qualify = QualifyAlterTypeStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_TYPE, .operationType = DIST_OPS_ALTER, @@ -1381,7 +1381,7 @@ static DistributeObjectOps Trigger_Drop = { static DistributeObjectOps Type_Rename = { .deparse = DeparseRenameTypeStmt, .qualify = QualifyRenameTypeStmt, - .preprocess = PreprocessAlterDistributedObjectStmt, + .preprocess = PreprocessAlterDistributedObjectStmtFromCoordinator, .postprocess = NULL, .objectType = OBJECT_TYPE, .operationType = DIST_OPS_ALTER, diff --git a/src/backend/distributed/commands/drop_distributed_table.c b/src/backend/distributed/commands/drop_distributed_table.c index c3d488b09bf..2ff12de6e40 100644 --- a/src/backend/distributed/commands/drop_distributed_table.c +++ b/src/backend/distributed/commands/drop_distributed_table.c @@ -27,9 +27,9 @@ /* local function forward declarations */ -static void MasterRemoveDistributedTableMetadataFromWorkers(Oid relationId, - char *schemaName, - char *tableName); +static void MasterRemoveDistributedTableMetadataFromRemoteNodes(Oid relationId, + char *schemaName, + char *tableName); /* exports for SQL callable functions */ @@ -86,7 +86,11 @@ master_remove_partition_metadata(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } - EnsureCoordinator(); + /* + * Today we support DROP from workers only if the table is a + * distributed-schema table, but it's okay to not ensure this here. + */ + EnsurePropagationToCoordinator(); CheckTableSchemaNameForDrop(relationId, &schemaName, &tableName); @@ -137,14 +141,15 @@ master_remove_distributed_table_metadata_from_workers(PG_FUNCTION_ARGS) CheckTableSchemaNameForDrop(relationId, &schemaName, &tableName); - MasterRemoveDistributedTableMetadataFromWorkers(relationId, schemaName, tableName); + MasterRemoveDistributedTableMetadataFromRemoteNodes(relationId, schemaName, + tableName); PG_RETURN_VOID(); } /* - * MasterRemoveDistributedTableMetadataFromWorkers drops the table and removes + * MasterRemoveDistributedTableMetadataFromRemoteNodes drops the table and removes * all the metadata belonging the distributed table in the worker nodes * with metadata. The function doesn't drop the tables that are * the shards on the workers. @@ -158,8 +163,8 @@ master_remove_distributed_table_metadata_from_workers(PG_FUNCTION_ARGS) * which CALLED_AS_TRIGGER does not cover. */ static void -MasterRemoveDistributedTableMetadataFromWorkers(Oid relationId, char *schemaName, - char *tableName) +MasterRemoveDistributedTableMetadataFromRemoteNodes(Oid relationId, char *schemaName, + char *tableName) { /* * The SQL_DROP trigger calls this function even for tables that are @@ -172,7 +177,11 @@ MasterRemoveDistributedTableMetadataFromWorkers(Oid relationId, char *schemaName return; } - EnsureCoordinator(); + /* + * Today we support DROP from workers only if the table is a + * distributed-schema table, but it's okay to not ensure this here. + */ + EnsurePropagationToCoordinator(); if (!ShouldSyncTableMetadataViaCatalog(relationId)) { @@ -182,7 +191,7 @@ MasterRemoveDistributedTableMetadataFromWorkers(Oid relationId, char *schemaName if (PartitionTable(relationId)) { /* - * MasterRemoveDistributedTableMetadataFromWorkers is only called from drop trigger. + * MasterRemoveDistributedTableMetadataFromRemoteNodes is only called from drop trigger. * When parent is dropped in a drop trigger, we remove all the corresponding * partitions via the parent, mostly for performance reasons. */ @@ -191,7 +200,7 @@ MasterRemoveDistributedTableMetadataFromWorkers(Oid relationId, char *schemaName /* drop the distributed table metadata on the workers */ char *deleteDistributionCommand = DistributionDeleteCommand(schemaName, tableName); - SendCommandToWorkersWithMetadata(deleteDistributionCommand); + SendCommandToRemoteNodesWithMetadata(deleteDistributionCommand); } diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index 0538f51b3b7..e75623d54f2 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -34,6 +34,7 @@ #include "pg_version_constants.h" +#include "distributed/backend_data.h" #include "distributed/citus_ruleutils.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" @@ -51,6 +52,7 @@ #include "distributed/relation_access_tracking.h" #include "distributed/relation_utils.h" #include "distributed/resource_lock.h" +#include "distributed/tenant_schema_metadata.h" #include "distributed/version_compat.h" #include "distributed/worker_manager.h" @@ -127,7 +129,7 @@ IsIndexRenameStmt(RenameStmt *renameStmt) * PreprocessIndexStmt determines whether a given CREATE INDEX statement involves * a distributed table. If so (and if the statement does not use unsupported * options), it modifies the input statement to ensure proper execution against - * the coordinator node table and creates a DDLJob to encapsulate information needed + * the distributed table and creates a DDLJob to encapsulate information needed * during the worker node portion of DDL execution before returning that DDLJob * in a List. If no distributed table is involved, this function returns NIL. */ @@ -181,7 +183,7 @@ PreprocessIndexStmt(Node *node, const char *createIndexCommand, return NIL; } - EnsureCoordinator(); + EnsureCoordinatorUnlessTenantSchema(relationId); if (createIndexStatement->idxname == NULL) { @@ -576,7 +578,7 @@ ReindexStmtFindRelationOid(ReindexStmt *reindexStmt, bool missingOk) * PreprocessReindexStmt determines whether a given REINDEX statement involves * a distributed table. If so (and if the statement does not use unsupported * options), it modifies the input statement to ensure proper execution against - * the coordinator node table and creates a DDLJob to encapsulate information needed + * the distributed table and creates a DDLJob to encapsulate information needed * during the worker node portion of DDL execution before returning that DDLJob * in a List. If no distributed table is involved, this function returns NIL. */ @@ -688,7 +690,7 @@ ReindexStmtObjectAddress(Node *stmt, bool missing_ok, bool isPostprocess) * PreprocessDropIndexStmt determines whether a given DROP INDEX statement involves * a distributed table. If so (and if the statement does not use unsupported * options), it modifies the input statement to ensure proper execution against - * the coordinator node table and creates a DDLJob to encapsulate information needed + * the distributed table and creates a DDLJob to encapsulate information needed * during the worker node portion of DDL execution before returning that DDLJob * in a List. If no distributed table is involved, this function returns NIL. */ @@ -799,12 +801,6 @@ PostprocessIndexStmt(Node *node, const char *queryString) { IndexStmt *indexStmt = castNode(IndexStmt, node); - /* this logic only applies to the coordinator */ - if (!IsCoordinator()) - { - return NIL; - } - /* * We make sure schema name is not null in the PreprocessIndexStmt */ @@ -1351,7 +1347,7 @@ void MarkIndexValid(IndexStmt *indexStmt) { Assert(indexStmt->concurrent); - Assert(IsCoordinator()); + Assert(!IsCitusInternalBackend()); /* * We make sure schema name is not null in the PreprocessIndexStmt diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index 33d682d6767..b4130c54552 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -3039,9 +3039,7 @@ CitusCopySelect(CopyStmt *copyStatement) { Form_pg_attribute attr = TupleDescAttr(tupleDescriptor, i); - if (attr->attisdropped || - attr->attgenerated - ) + if (IsDroppedOrGenerated(attr)) { continue; } diff --git a/src/backend/distributed/commands/publication.c b/src/backend/distributed/commands/publication.c index 7c1db06d659..08e726ae421 100644 --- a/src/backend/distributed/commands/publication.c +++ b/src/backend/distributed/commands/publication.c @@ -357,8 +357,8 @@ BuildPublicationRelationObjSpec(Oid relationId, Oid publicationId, /* - * PreprocessAlterPublicationStmt handles ALTER PUBLICATION statements - * in a way that is mostly similar to PreprocessAlterDistributedObjectStmt, + * PreprocessAlterPublicationStmt handles ALTER PUBLICATION statements in a way + * that is mostly similar to PreprocessAlterDistributedObjectStmtFromCoordinator, * except we do not ensure sequential mode (publications do not interact with * shards) and can handle NULL deparse commands for ALTER PUBLICATION commands * that only involve local tables. diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index 173bcd48ea8..091e34b7a94 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -1015,6 +1015,16 @@ PreprocessCreateRoleStmt(Node *node, const char *queryString, EnsureSequentialModeForRoleDDL(); + /* + * If we're on a worker, first acquire the lock on the coordinator via + * the remote metadata connection to the coordinator as superuser. Fwiw, + * we'll acquire the lock on the local node as well. + */ + if (!IsCoordinator()) + { + LockPgDistNodeOnCoordinatorViaSuperUser(RowShareLock); + } + LockRelationOid(DistNodeRelationId(), RowShareLock); CreateRoleStmt *createRoleStmt = castNode(CreateRoleStmt, node); diff --git a/src/backend/distributed/commands/schema.c b/src/backend/distributed/commands/schema.c index b079fe3f674..3ca166c6da0 100644 --- a/src/backend/distributed/commands/schema.c +++ b/src/backend/distributed/commands/schema.c @@ -64,7 +64,7 @@ PostprocessCreateSchemaStmt(Node *node, const char *queryString) return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); EnsureSequentialMode(OBJECT_SCHEMA); @@ -130,7 +130,7 @@ PostprocessCreateSchemaStmt(Node *node, const char *queryString) commands = lappend(commands, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } @@ -157,7 +157,7 @@ PreprocessDropSchemaStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); EnsureSequentialMode(OBJECT_SCHEMA); @@ -190,7 +190,7 @@ PreprocessDropSchemaStmt(Node *node, const char *queryString, (void *) sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } diff --git a/src/backend/distributed/commands/schema_based_sharding.c b/src/backend/distributed/commands/schema_based_sharding.c index 6635d6817a5..175716b48d4 100644 --- a/src/backend/distributed/commands/schema_based_sharding.c +++ b/src/backend/distributed/commands/schema_based_sharding.c @@ -265,23 +265,7 @@ EnsureFKeysForTenantTable(Oid relationId) void CreateTenantSchemaTable(Oid relationId) { - if (!IsCoordinator()) - { - /* - * We don't support creating tenant tables from workers. We could - * let ShouldCreateTenantSchemaTable() to return false to allow users - * to create a local table as usual but that would be confusing because - * it might sound like we allow creating tenant tables from workers. - * For this reason, we prefer to throw an error instead. - * - * Indeed, CreateSingleShardTable() would already do so but we - * prefer to throw an error with a more meaningful message, rather - * than saying "operation is not allowed on this node". - */ - ereport(ERROR, (errmsg("cannot create tables in a distributed schema from " - "a worker node"), - errhint("Connect to the coordinator node and try again."))); - } + EnsurePropagationToCoordinator(); EnsureTableKindSupportedForTenantSchema(relationId); @@ -301,7 +285,8 @@ CreateTenantSchemaTable(Oid relationId) .colocationParamType = COLOCATE_WITH_COLOCATION_ID, .colocationId = colocationId, }; - CreateSingleShardTable(relationId, colocationParam); + bool allowFromWorkers = true; + CreateSingleShardTable(relationId, colocationParam, allowFromWorkers); } @@ -553,7 +538,7 @@ UnregisterTenantSchemaGlobally(Oid schemaId, char *schemaName) DeleteTenantSchemaLocally(schemaId); if (EnableMetadataSync) { - SendCommandToWorkersWithMetadata(TenantSchemaDeleteCommand(schemaName)); + SendCommandToRemoteNodesWithMetadata(TenantSchemaDeleteCommand(schemaName)); } DeleteColocationGroup(tenantSchemaColocationId); @@ -579,10 +564,22 @@ citus_internal_unregister_tenant_schema_globally(PG_FUNCTION_ARGS) char *schemaNameStr = text_to_cstring(schemaName); /* - * Skip on workers because we expect this to be called from the coordinator - * only via drop hook. + * Have this check to make sure we execute this only on the backend executing + * the distributed "DROP SCHEMA" command -not on internal backends propagating + * the DDL to remote nodes- to prevent other nodes from trying to unregister + * the same tenant schema globally, since the backend executing the distributed + * "DROP SCHEMA" command already does so globally via this function. + * + * Actually, even if didn't have this check, the other nodes would still be + * prevented from trying to unregister the same tenant schema globally. This + * is because, when dropping a distributed schema, we first delete the tenant + * schema from metadata globally and then we drop the schema itself on other + * nodes. So, when the drop hook is called on other nodes, it would not try to + * unregister the tenant schema globally since the schema would not be found + * in the tenant schema metadata. However, having this check makes it more + * explicit and guards us against future changes. */ - if (!IsCoordinator()) + if (IsCitusInternalBackend() || IsRebalancerInternalBackend()) { PG_RETURN_VOID(); } @@ -684,7 +681,9 @@ citus_schema_distribute(PG_FUNCTION_ARGS) originalForeignKeyRecreationCommands, fkeyCommandsForRelation); DropFKeysRelationInvolvedWithTableType(relationId, INCLUDE_ALL_TABLE_TYPES); - CreateSingleShardTable(relationId, colocationParam); + bool allowFromWorkers = false; + CreateSingleShardTable(relationId, colocationParam, + allowFromWorkers); } /* We can skip foreign key validations as we are sure about them at start */ diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index c482c25fa46..5ada87b7c84 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -54,6 +54,7 @@ #include "distributed/resource_lock.h" #include "distributed/tenant_schema_metadata.h" #include "distributed/version_compat.h" +#include "distributed/worker_protocol.h" #include "distributed/worker_shard_visibility.h" @@ -124,7 +125,7 @@ static Oid get_attrdef_oid(Oid relationId, AttrNumber attnum); static char * GetAddColumnWithNextvalDefaultCmd(Oid sequenceOid, Oid relationId, char *colname, TypeName *typeName, - bool ifNotExists); + bool ifNotExists, bool forceUseNextVal); static void ErrorIfAlterTableDropTableNameFromPostgresFdw(List *optionList, Oid relationId); @@ -136,7 +137,7 @@ static void ErrorIfAlterTableDropTableNameFromPostgresFdw(List *optionList, Oid static bool SetupExecutionModeForAlterTable(Oid relationId, AlterTableCmd *command); /* - * PreprocessDropTableStmt processes DROP TABLE commands for partitioned tables. + * PreprocessDropTableStmt processes DROP TABLE commands for Citus tables. * If we are trying to DROP partitioned tables, we first need to go to MX nodes * and DETACH partitions from their parents. Otherwise, we process DROP command * multiple times in MX workers. For shards, we send DROP commands with IF EXISTS @@ -169,6 +170,13 @@ PreprocessDropTableStmt(Node *node, const char *queryString, continue; } + /* + * For the Citus tables except tenant schema tables, we don't allow + * dropping from the workers. For tenant schema tables, we allow dropping + * from the workers only if the coordinator is in the metadata. + */ + EnsureCoordinatorUnlessTenantSchema(relationId); + /* * While changing the tables that are part of a colocation group we need to * prevent concurrent mutations to the placements of the shard groups. @@ -176,7 +184,20 @@ PreprocessDropTableStmt(Node *node, const char *queryString, CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(relationId); if (cacheEntry->colocationId != INVALID_COLOCATION_ID) { - LockColocationId(cacheEntry->colocationId, ShareLock); + /* + * If we're on a worker, we acquire the lock on the coordinator via the + * remote metadata connection to the coordinator. + */ + if (IsCoordinator()) + { + LockColocationId(cacheEntry->colocationId, ShareLock); + } + else + { + char *command = LockColocationIdCommand(cacheEntry->colocationId, + ShareLock); + SendCommandToCoordinator(command); + } } /* invalidate foreign key cache if the table involved in any foreign key */ @@ -185,21 +206,22 @@ PreprocessDropTableStmt(Node *node, const char *queryString, MarkInvalidateForeignKeyGraph(); } - /* we're only interested in partitioned and mx tables */ + /* + * From this point on, we're only interested in partitioned Citus + * tables & only if MX is enabled. + */ if (!ShouldSyncTableMetadata(relationId) || !PartitionedTable(relationId)) { continue; } - EnsureCoordinator(); - List *partitionList = PartitionList(relationId); if (list_length(partitionList) == 0) { continue; } - SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); + SendCommandToRemoteNodesWithMetadata(DISABLE_DDL_PROPAGATION); Oid partitionRelationId = InvalidOid; foreach_declared_oid(partitionRelationId, partitionList) @@ -207,10 +229,10 @@ PreprocessDropTableStmt(Node *node, const char *queryString, char *detachPartitionCommand = GenerateDetachPartitionCommand(partitionRelationId); - SendCommandToWorkersWithMetadata(detachPartitionCommand); + SendCommandToRemoteNodesWithMetadata(detachPartitionCommand); } - SendCommandToWorkersWithMetadata(ENABLE_DDL_PROPAGATION); + SendCommandToRemoteNodesWithMetadata(ENABLE_DDL_PROPAGATION); } return NIL; @@ -666,7 +688,9 @@ DistributePartitionUsingParent(Oid parentCitusRelationId, Oid partitionRelationI .colocationParamType = COLOCATE_WITH_TABLE_LIKE_OPT, .colocateWithTableName = parentRelationName, }; - CreateSingleShardTable(partitionRelationId, colocationParam); + bool allowFromWorkers = false; + CreateSingleShardTable(partitionRelationId, colocationParam, + allowFromWorkers); return; } @@ -1260,7 +1284,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand, ErrorIfUnsupportedAlterTableStmt(alterTableStatement); } - EnsureCoordinator(); + EnsureCoordinatorUnlessTenantSchema(leftRelationId); /* these will be set in below loop according to subcommands */ Oid rightRelationId = InvalidOid; @@ -1289,10 +1313,10 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand, * alterTableStmt */ bool deparseAT = false; - bool propagateCommandToWorkers = true; + bool propagateCommandToRemoteNodes = true; /* - * Sometimes we want to run a different DDL Command string in MX workers + * Sometimes we want to run a different DDL Command string on remote MX workers * For example, in cases where worker_nextval should be used instead * of nextval() in column defaults with type int and smallint */ @@ -1546,7 +1570,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand, if (contain_nextval_expression_walker(expr, NULL)) { - propagateCommandToWorkers = false; + propagateCommandToRemoteNodes = false; useInitialDDLCommandString = false; } } @@ -1642,7 +1666,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand, if (OidIsValid(rightRelationId)) { bool referencedIsLocalTable = !IsCitusTable(rightRelationId); - if (referencedIsLocalTable || !propagateCommandToWorkers) + if (referencedIsLocalTable || !propagateCommandToRemoteNodes) { ddlJob->taskList = NIL; } @@ -1657,7 +1681,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand, { /* ... otherwise use standard DDL task list function */ ddlJob->taskList = DDLTaskList(leftRelationId, alterTableCommand); - if (!propagateCommandToWorkers) + if (!propagateCommandToRemoteNodes) { ddlJob->taskList = NIL; } @@ -2401,11 +2425,17 @@ PreprocessAlterTableSchemaStmt(Node *node, const char *queryString, } /* Undistribute table if its old schema is a tenant schema */ - if (IsTenantSchema(oldSchemaId) && IsCoordinator()) + if (IsTenantSchema(oldSchemaId)) { EnsureUndistributeTenantTableSafe(relationId, TenantOperationNames[TENANT_SET_SCHEMA]); + if (!IsCoordinator()) + { + ereport(ERROR, (errmsg("moving distributed schema tables to another " + "schema from workers is not supported yet"))); + } + char *oldSchemaName = get_namespace_name(oldSchemaId); char *tableName = stmt->relation->relname; ereport(NOTICE, (errmsg("undistributing table %s in distributed schema %s " @@ -2620,10 +2650,47 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement) EnsureAllObjectDependenciesExistOnAllNodes(list_make1(tableAddress)); } - /* for the new sequences coming with this ALTER TABLE statement */ - bool needMetadataSyncForNewSequences = false; - - char *alterTableDefaultNextvalCmd = NULL; + /* + * If this is an ALTER TABLE ADD COLUMN .. DEFAULT nextval('..') or + * an ALTER TABLE ALTER COLUMN .. SET DEFAULT nextval('..') command, + * we need to adjust the sequence ranges on all workers, and since + * EnsureAllObjectDependenciesExistOnAllNodes() already does so for + * all remote nodes when propagating the sequence, there is nothing + * that needs to be additionally done from this perspective when we + * are on the coordinator because in that case all remote nodes are + * workers anyways. + * + * For this reason, we also we want to do the same for the local node + * if it's not the coordinator. So sequenceOidToAdjustRangesForLocalWorker + * is set only when we're on a worker so that we can adjust sequence + * ranges for the local worker as well. + * + * For such commands, we also need to adjust nextval default command + * for the column that uses the sequence on all workers. + * defaultNextvalCmdForRemoteWorkers already does all the work for + * remote workers, i.e., alters the column or adds it in a way that + * uses appropriate function call as column default expression, so + * it's sufficient when we're on the coordinator. + * + * However, when we're on a worker, we also need to take such actions + * on coordinator as well, so it's when defaultNextvalCmdForCoordinator + * becomes useful, which alters the column or adds it in a way that + * uses appropriate function call as column default expression. And for + * the local worker, we also need to take such actions as well, but in + * that case, the original DDL has already been executed on the local + * node, so this means that the column is either just added or is an + * existing column that's being altered now. So, in any case, we only + * need to alter the column default to use appropriate function call + * for both types of DDLs, so it's when defaultNextvalCmdForLocalWorker + * becomes useful. + * + * Note that for any of these commands, we don't allow issuing them + * together with other subcommands, see ErrorIfUnsupportedAlterTableStmt(). + */ + char *defaultNextvalCmdForRemoteWorkers = NULL; + char *defaultNextvalCmdForCoordinator = NULL; + char *defaultNextvalCmdForLocalWorker = NULL; + Oid sequenceOidToAdjustRangesForLocalWorker = InvalidOid; List *commandList = alterTableStatement->cmds; AlterTableCmd *command = NULL; @@ -2706,8 +2773,11 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement) { if (ShouldSyncTableMetadata(relationId)) { - needMetadataSyncForNewSequences = true; - alterTableDefaultNextvalCmd = + Assert(list_length(commandList) == 1 && + list_length(columnConstraints) == 1); + + bool forceUseNextVal = false; + defaultNextvalCmdForRemoteWorkers = GetAddColumnWithNextvalDefaultCmd(seqOid, relationId, columnDefinition @@ -2715,7 +2785,33 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement) columnDefinition ->typeName, command-> - missing_ok); + missing_ok, + forceUseNextVal) + ; + + if (!IsCoordinator()) + { + forceUseNextVal = true; + defaultNextvalCmdForCoordinator = + GetAddColumnWithNextvalDefaultCmd(seqOid, + relationId, + columnDefinition + ->colname, + columnDefinition + ->typeName, + command-> + missing_ok, + forceUseNextVal); + + forceUseNextVal = false; + defaultNextvalCmdForLocalWorker = + GetAlterColumnWithNextvalDefaultCmd( + seqOid, relationId, columnDefinition-> + colname, + command->missing_ok, forceUseNextVal); + + sequenceOidToAdjustRangesForLocalWorker = seqOid; + } } } } @@ -2743,33 +2839,90 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement) { if (ShouldSyncTableMetadata(relationId)) { - needMetadataSyncForNewSequences = true; + Assert(list_length(commandList) == 1); + bool missingTableOk = false; - alterTableDefaultNextvalCmd = GetAlterColumnWithNextvalDefaultCmd( - seqOid, relationId, command->name, missingTableOk); + bool forceUseNextVal = false; + defaultNextvalCmdForRemoteWorkers = + GetAlterColumnWithNextvalDefaultCmd( + seqOid, relationId, command->name, + missingTableOk, forceUseNextVal); + + if (!IsCoordinator()) + { + forceUseNextVal = true; + defaultNextvalCmdForCoordinator = + GetAlterColumnWithNextvalDefaultCmd( + seqOid, relationId, command->name, + missingTableOk, forceUseNextVal); + + forceUseNextVal = false; + defaultNextvalCmdForLocalWorker = + GetAlterColumnWithNextvalDefaultCmd( + seqOid, relationId, command->name, + missingTableOk, forceUseNextVal); + + sequenceOidToAdjustRangesForLocalWorker = seqOid; + } } } } } } - if (needMetadataSyncForNewSequences) + /* + * It's easy to retrieve the sequence id to create the proper commands + * in postprocess, after the dependency between the sequence and the table + * has been created. We already return ddlJobs in PreprocessAlterTableStmt, + * hence we can't return ddlJobs in PostprocessAlterTableStmt. + * That's why we execute defaultNextvalCmdForRemoteWorkers and + * defaultNextvalCmdForCoordinator here instead of in ExecuteDistributedDDLJob(). + */ + + if (defaultNextvalCmdForRemoteWorkers) { /* prevent recursive propagation */ - SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); + SendCommandToRemoteWorkersWithMetadata(DISABLE_DDL_PROPAGATION); - /* - * It's easy to retrieve the sequence id to create the proper commands - * in postprocess, after the dependency between the sequence and the table - * has been created. We already return ddlJobs in PreprocessAlterTableStmt, - * hence we can't return ddlJobs in PostprocessAlterTableStmt. - * That's why we execute the following here instead of - * in ExecuteDistributedDDLJob - */ - SendCommandToWorkersWithMetadata(alterTableDefaultNextvalCmd); + SendCommandToRemoteWorkersWithMetadata(defaultNextvalCmdForRemoteWorkers); - SendCommandToWorkersWithMetadata(ENABLE_DDL_PROPAGATION); + SendCommandToRemoteWorkersWithMetadata(ENABLE_DDL_PROPAGATION); } + + if (defaultNextvalCmdForCoordinator) + { + /* prevent recursive propagation */ + SendCommandToCoordinator(DISABLE_DDL_PROPAGATION); + + SendCommandToCoordinator(defaultNextvalCmdForCoordinator); + + SendCommandToCoordinator(ENABLE_DDL_PROPAGATION); + } + + /* before executing commands for the local node, make sure to prevent recursive propagation */ + bool oldEnableDDLPropagation = EnableDDLPropagation; + SetLocalEnableDDLPropagation(false); + + if (OidIsValid(sequenceOidToAdjustRangesForLocalWorker)) + { + Oid sequenceSchemaOid = + get_rel_namespace(sequenceOidToAdjustRangesForLocalWorker); + char *sequenceSchemaName = get_namespace_name(sequenceSchemaOid); + char *sequenceName = get_rel_name(sequenceOidToAdjustRangesForLocalWorker); + Oid sequenceTypeId = pg_get_sequencedef(sequenceOidToAdjustRangesForLocalWorker)-> + seqtypid; + + AlterSequenceMinMax(sequenceOidToAdjustRangesForLocalWorker, sequenceSchemaName, + sequenceName, + sequenceTypeId); + } + + if (defaultNextvalCmdForLocalWorker) + { + ExecuteAndLogUtilityCommand(defaultNextvalCmdForLocalWorker); + } + + SetLocalEnableDDLPropagation(oldEnableDDLPropagation); } @@ -2933,17 +3086,22 @@ get_attrdef_oid(Oid relationId, AttrNumber attnum) /* * GetAlterColumnWithNextvalDefaultCmd returns a string representing: * ALTER TABLE ALTER COLUMN .. SET DEFAULT nextval() - * If sequence type is not bigint, we use worker_nextval() instead of nextval(). + * + * If sequence type is not bigint, we use worker_nextval() instead of nextval(), + * unless forceUseNextVal is set to true; otherwise, we always use nextval() + * for the default expression. + * */ char * -GetAlterColumnWithNextvalDefaultCmd(Oid sequenceOid, Oid relationId, char *colname, bool - missingTableOk) +GetAlterColumnWithNextvalDefaultCmd(Oid sequenceOid, Oid relationId, char *colname, + bool missingTableOk, bool forceUseNextVal) { char *qualifiedSequenceName = generate_qualified_relation_name(sequenceOid); char *qualifiedRelationName = generate_qualified_relation_name(relationId); char *nextvalFunctionName = "nextval"; - bool useWorkerNextval = (pg_get_sequencedef(sequenceOid)->seqtypid != INT8OID); + bool useWorkerNextval = + !forceUseNextVal && (pg_get_sequencedef(sequenceOid)->seqtypid != INT8OID); if (useWorkerNextval) { /* @@ -2978,17 +3136,22 @@ GetAlterColumnWithNextvalDefaultCmd(Oid sequenceOid, Oid relationId, char *colna /* * GetAddColumnWithNextvalDefaultCmd returns a string representing: * ALTER TABLE ADD COLUMN .. DEFAULT nextval() - * If sequence type is not bigint, we use worker_nextval() instead of nextval(). + * + * If sequence type is not bigint, we use worker_nextval() instead of nextval(), + * unless forceUseNextVal is set to true; otherwise, we always use nextval() + * for the default expression. */ static char * GetAddColumnWithNextvalDefaultCmd(Oid sequenceOid, Oid relationId, char *colname, - TypeName *typeName, bool ifNotExists) + TypeName *typeName, bool ifNotExists, + bool forceUseNextVal) { char *qualifiedSequenceName = generate_qualified_relation_name(sequenceOid); char *qualifiedRelationName = generate_qualified_relation_name(relationId); char *nextvalFunctionName = "nextval"; - bool useWorkerNextval = (pg_get_sequencedef(sequenceOid)->seqtypid != INT8OID); + bool useWorkerNextval = + !forceUseNextVal && (pg_get_sequencedef(sequenceOid)->seqtypid != INT8OID); if (useWorkerNextval) { /* @@ -3335,7 +3498,13 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) { /* * We currently don't support adding a serial column for an MX table - * TODO: record the dependency in the workers + * Note: Once this is allowed; + * i) Record the dependency in the remote nodes. + * ii) Similar to what we do at the end of CreateCitusTable() + * when creating a distributed table from a worker, once + * this is allowed, we should adjust sequence ranges and + * nextval calls on the local node when executing on a + * worker. */ if (ShouldSyncTableMetadata(relationId)) { @@ -3386,6 +3555,10 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) { /* * We currently don't support adding an identity column for an MX table + * Note: Similar to what we do at the end of CreateCitusTable() when + * creating a distributed table from a worker, once this is allowed, + * we should adjust sequence ranges and nextval calls on the local + * node when executing on a worker. */ if (ShouldSyncTableMetadata(relationId)) { @@ -4334,11 +4507,6 @@ ConvertToTenantTableIfNecessary(AlterObjectSchemaStmt *stmt) { Assert(stmt->objectType == OBJECT_TABLE || stmt->objectType == OBJECT_FOREIGN_TABLE); - if (!IsCoordinator()) - { - return; - } - /* * We will let Postgres deal with missing_ok */ @@ -4349,16 +4517,24 @@ ConvertToTenantTableIfNecessary(AlterObjectSchemaStmt *stmt) /* We have already asserted that we have exactly 1 address in the addresses. */ ObjectAddress *tableAddress = linitial(tableAddresses); - char relKind = get_rel_relkind(tableAddress->objectId); + + Oid relationId = tableAddress->objectId; + if (!OidIsValid(relationId)) + { + Assert(stmt->missing_ok); + return; + } + + char relKind = get_rel_relkind(relationId); if (relKind == RELKIND_SEQUENCE || relKind == RELKIND_VIEW) { return; } - Oid relationId = tableAddress->objectId; Oid schemaId = get_namespace_oid(stmt->newschema, stmt->missing_ok); if (!OidIsValid(schemaId)) { + Assert(stmt->missing_ok); return; } @@ -4368,16 +4544,22 @@ ConvertToTenantTableIfNecessary(AlterObjectSchemaStmt *stmt) * that by seeing the table is still a single shard table. (i.e. not undistributed * at `preprocess` step) */ - if (!IsCitusTableType(relationId, SINGLE_SHARD_DISTRIBUTED) && - IsTenantSchema(schemaId)) + if (IsCitusTableType(relationId, SINGLE_SHARD_DISTRIBUTED)) { - EnsureTenantTable(relationId, "ALTER TABLE SET SCHEMA"); - - char *schemaName = get_namespace_name(schemaId); - char *tableName = stmt->relation->relname; - ereport(NOTICE, (errmsg("Moving %s into distributed schema %s", - tableName, schemaName))); + return; + } - CreateTenantSchemaTable(relationId); + if (!ShouldCreateTenantSchemaTable(relationId)) + { + return; } + + EnsureTenantTable(relationId, "ALTER TABLE SET SCHEMA"); + + char *schemaName = get_namespace_name(schemaId); + char *tableName = stmt->relation->relname; + ereport(NOTICE, (errmsg("Moving %s into distributed schema %s", + tableName, schemaName))); + + CreateTenantSchemaTable(relationId); } diff --git a/src/backend/distributed/commands/trigger.c b/src/backend/distributed/commands/trigger.c index 5b4d9358488..983b10b96f1 100644 --- a/src/backend/distributed/commands/trigger.c +++ b/src/backend/distributed/commands/trigger.c @@ -35,6 +35,7 @@ #include "distributed/metadata_sync.h" #include "distributed/namespace_utils.h" #include "distributed/shard_utils.h" +#include "distributed/tenant_schema_metadata.h" #include "distributed/worker_protocol.h" @@ -312,7 +313,8 @@ PostprocessCreateTriggerStmt(Node *node, const char *queryString) return NIL; } - EnsureCoordinator(); + EnsureCoordinatorUnlessTenantSchema(relationId); + ErrorOutForTriggerIfNotSupported(relationId); List *objectAddresses = GetObjectAddressListFromParseTree(node, missingOk, true); @@ -423,7 +425,8 @@ PostprocessAlterTriggerRenameStmt(Node *node, const char *queryString) return NIL; } - EnsureCoordinator(); + EnsureCoordinatorUnlessTenantSchema(relationId); + ErrorOutForTriggerIfNotSupported(relationId); /* use newname as standard process utility already renamed it */ @@ -550,7 +553,8 @@ PostprocessAlterTriggerDependsStmt(Node *node, const char *queryString) return NIL; } - EnsureCoordinator(); + EnsureCoordinatorUnlessTenantSchema(relationId); + ErrorOutForTriggerIfNotSupported(relationId); String *triggerNameValue = @@ -665,7 +669,8 @@ ErrorIfUnsupportedDropTriggerCommand(DropStmt *dropTriggerStmt) return; } - EnsureCoordinator(); + EnsureCoordinatorUnlessTenantSchema(relationId); + ErrorOutForTriggerIfNotSupported(relationId); } @@ -686,12 +691,13 @@ ErrorOutForTriggerIfNotSupported(Oid relationId) { ereport(ERROR, (errmsg("triggers are not supported on reference tables"))); } - else if (IsCitusTableType(relationId, DISTRIBUTED_TABLE)) + else if (IsCitusTableType(relationId, DISTRIBUTED_TABLE) && + !IsTenantSchema(get_rel_namespace(relationId))) { ereport(ERROR, (errmsg("triggers are not supported on distributed tables"))); } - /* we always support triggers on citus local tables */ + /* we always support triggers on citus local tables and distributed-schema tables */ } diff --git a/src/backend/distributed/commands/truncate.c b/src/backend/distributed/commands/truncate.c index f71f779a515..94efd85cf8e 100644 --- a/src/backend/distributed/commands/truncate.c +++ b/src/backend/distributed/commands/truncate.c @@ -175,12 +175,13 @@ Datum truncate_local_data_after_distributing_table(PG_FUNCTION_ARGS) { CheckCitusVersion(ERROR); - EnsureCoordinator(); Oid relationId = PG_GETARG_OID(0); EnsureLocalTableCanBeTruncated(relationId); + EnsureCoordinatorUnlessTenantSchema(relationId); + TruncateStmt *truncateStmt = makeNode(TruncateStmt); char *relationName = generate_qualified_relation_name(relationId); diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index f09d7ced39e..242787159d6 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -1582,6 +1582,20 @@ NodeDDLTaskList(TargetWorkerSet targets, List *commands) } +/* + * SetLocalEnableDDLPropagation is simply a C interface for setting + * the following: + * SET LOCAL citus.enable_ddl_propagation = 'on'|'off'; + */ +void +SetLocalEnableDDLPropagation(bool state) +{ + set_config_option("citus.enable_ddl_propagation", state == true ? "on" : "off", + (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, + GUC_ACTION_LOCAL, true, 0, false); +} + + /* * AlterTableInProgress returns true if we're processing an ALTER TABLE command * right now. diff --git a/src/backend/distributed/commands/view.c b/src/backend/distributed/commands/view.c index 0ffd00ec406..b24745955da 100644 --- a/src/backend/distributed/commands/view.c +++ b/src/backend/distributed/commands/view.c @@ -101,7 +101,7 @@ PreprocessViewStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); return NIL; } @@ -112,7 +112,7 @@ PreprocessViewStmt(Node *node, const char *queryString, * propagate views. * * If view depends on any undistributable object, Citus can not distribute it. In order to - * not to prevent users from creating local views on the coordinator WARNING message will + * not to prevent users from creating local views on the local node, WARNING message will * be sent to the customer about the case instead of erroring out. If no worker nodes exist * at all, view will be created locally without any WARNING message. * @@ -271,7 +271,7 @@ PreprocessDropViewStmt(Node *node, const char *queryString, ProcessUtilityContex return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); EnsureSequentialMode(OBJECT_VIEW); /* @@ -288,7 +288,7 @@ PreprocessDropViewStmt(Node *node, const char *queryString, ProcessUtilityContex (void *) dropStmtSql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } @@ -567,7 +567,7 @@ PreprocessAlterViewStmt(Node *node, const char *queryString, ProcessUtilityConte QualifyTreeNode((Node *) stmt); - EnsureCoordinator(); + EnsurePropagationToCoordinator(); /* reconstruct alter statement in a portable fashion */ const char *alterViewStmtSql = DeparseTreeNode((Node *) stmt); @@ -659,7 +659,7 @@ PreprocessRenameViewStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); /* fully qualify */ QualifyTreeNode(node); @@ -721,7 +721,7 @@ PreprocessAlterViewSchemaStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); QualifyTreeNode((Node *) stmt); diff --git a/src/backend/distributed/executor/citus_custom_scan.c b/src/backend/distributed/executor/citus_custom_scan.c index 95df92f94fa..db7e4f725ff 100644 --- a/src/backend/distributed/executor/citus_custom_scan.c +++ b/src/backend/distributed/executor/citus_custom_scan.c @@ -270,13 +270,16 @@ CitusExecScan(CustomScanState *node) AdaptiveExecutor(scanState); - if (isMultiTaskPlan) + if (!scanState->distributedPlan->disableTrackingQueryCounters) { - IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD); - } - else - { - IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD); + if (isMultiTaskPlan) + { + IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD); + } + else + { + IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD); + } } scanState->finishedRemoteScan = true; diff --git a/src/backend/distributed/executor/insert_select_executor.c b/src/backend/distributed/executor/insert_select_executor.c index f688484699a..0c6782a3f64 100644 --- a/src/backend/distributed/executor/insert_select_executor.c +++ b/src/backend/distributed/executor/insert_select_executor.c @@ -181,20 +181,23 @@ NonPushableInsertSelectExecScan(CustomScanState *node) targetRelation, binaryFormat); - if (list_length(distSelectTaskList) <= 1) + if (!distSelectPlan->disableTrackingQueryCounters) { - /* - * Probably we will never get here for a repartitioned - * INSERT..SELECT because when the source is a single shard - * table, we should most probably choose to use - * MODIFY_WITH_SELECT_VIA_COORDINATOR, but we still keep this - * here. - */ - IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD); - } - else - { - IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD); + if (list_length(distSelectTaskList) <= 1) + { + /* + * Probably we will never get here for a repartitioned + * INSERT..SELECT because when the source is a single shard + * table, we should most probably choose to use + * MODIFY_WITH_SELECT_VIA_COORDINATOR, but we still keep this + * here. + */ + IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD); + } + else + { + IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD); + } } /* @@ -217,13 +220,23 @@ NonPushableInsertSelectExecScan(CustomScanState *node) taskList, tupleDest, hasReturning); - if (list_length(taskList) <= 1) - { - IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD); - } - else + if (!distributedPlan->disableTrackingQueryCounters) { - IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD); + if (list_length(taskList) <= 1) + { + /* + * Probably we will never get here for a repartitioned + * INSERT..SELECT because when the source is a single shard + * table, we should most probably choose to use + * MODIFY_WITH_SELECT_VIA_COORDINATOR, but we still keep this + * here. + */ + IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD); + } + else + { + IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD); + } } executorState->es_processed = rowsInserted; @@ -301,13 +314,23 @@ NonPushableInsertSelectExecScan(CustomScanState *node) } } - if (list_length(prunedTaskList) <= 1) - { - IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD); - } - else + if (!distributedPlan->disableTrackingQueryCounters) { - IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD); + if (list_length(prunedTaskList) <= 1) + { + /* + * Probably we will never get here for a repartitioned + * INSERT..SELECT because when the source is a single shard + * table, we should most probably choose to use + * MODIFY_WITH_SELECT_VIA_COORDINATOR, but we still keep this + * here. + */ + IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD); + } + else + { + IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD); + } } } else diff --git a/src/backend/distributed/executor/merge_executor.c b/src/backend/distributed/executor/merge_executor.c index 6ce86c7a4ba..bdbc00b1c15 100644 --- a/src/backend/distributed/executor/merge_executor.c +++ b/src/backend/distributed/executor/merge_executor.c @@ -170,19 +170,22 @@ ExecuteSourceAtWorkerAndRepartition(CitusScanState *scanState) distSourceTaskList, partitionColumnIndex, targetRelation, binaryFormat); - if (list_length(distSourceTaskList) <= 1) + if (!distSourcePlan->disableTrackingQueryCounters) { - /* - * Probably we will never get here for a repartitioned MERGE - * because when the source is a single shard table, we should - * most probably choose to use ExecuteSourceAtCoordAndRedistribution(), - * but we still keep this here. - */ - IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD); - } - else - { - IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD); + if (list_length(distSourceTaskList) <= 1) + { + /* + * Probably we will never get here for a repartitioned MERGE + * because when the source is a single shard table, we should + * most probably choose to use ExecuteSourceAtCoordAndRedistribution(), + * but we still keep this here. + */ + IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD); + } + else + { + IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD); + } } ereport(DEBUG1, (errmsg("Executing final MERGE on workers using " @@ -213,13 +216,22 @@ ExecuteSourceAtWorkerAndRepartition(CitusScanState *scanState) hasReturning, paramListInfo); - if (list_length(taskList) <= 1) - { - IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD); - } - else + if (!distributedPlan->disableTrackingQueryCounters) { - IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD); + if (list_length(taskList) <= 1) + { + /* + * Probably we will never get here for a repartitioned MERGE + * because when the source is a single shard table, we should + * most probably choose to use ExecuteSourceAtCoordAndRedistribution(), + * but we still keep this here. + */ + IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD); + } + else + { + IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD); + } } executorState->es_processed = rowsMerged; @@ -314,7 +326,7 @@ ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState) prunedTaskList = list_concat(prunedTaskList, emptySourceTaskList); } - if (prunedTaskList == NIL) + if (!distributedPlan->disableTrackingQueryCounters && prunedTaskList == NIL) { /* * No task to execute, but we still increment STAT_QUERY_EXECUTION_SINGLE_SHARD @@ -341,13 +353,16 @@ ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState) hasReturning, paramListInfo); - if (list_length(prunedTaskList) == 1) + if (!distributedPlan->disableTrackingQueryCounters) { - IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD); - } - else - { - IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD); + if (list_length(prunedTaskList) == 1) + { + IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_SINGLE_SHARD); + } + else + { + IncrementStatCounterForMyDb(STAT_QUERY_EXECUTION_MULTI_SHARD); + } } executorState->es_processed = rowsMerged; diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index eb6bdf111a3..1893e262765 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -998,8 +998,9 @@ IsTaskExecutionAllowed(bool isRemote) * checks yet. * - Users need to explicitly set enable_unsafe_triggers in order to create * triggers on distributed tables. - * - Triggers on Citus local tables should be able to access other Citus local - * tables. + * - Triggers on Citus local tables and distributed schema tables should be able + * to access other Citus local tables and colocated distributed schema tables + * respectively. */ return true; } diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 7934e5d3d20..328f2893f4f 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -105,15 +105,17 @@ static void EnsureObjectMetadataIsSane(int distributionArgumentIndex, static List * GetFunctionDependenciesForObjects(ObjectAddress *objectAddress); static char * SchemaOwnerName(Oid objectId); static bool HasMetadataWorkers(void); -static void CreateShellTableOnWorkers(Oid relationId); -static void CreateTableMetadataOnWorkers(Oid relationId); -static void CreateDependingViewsOnWorkers(Oid relationId); +static void CreateShellTableOnRemoteNodes(Oid relationId); +static void CreateTableMetadataOnRemoteNodes(Oid relationId); +static void CreateDependingViewsOnRemoteNodes(Oid relationId); static void AddTableToPublications(Oid relationId); static NodeMetadataSyncResult SyncNodeMetadataToNodesOptional(void); static bool ShouldSyncTableMetadataInternal(bool hashDistributed, bool citusTableWithNoDistKey); static bool SyncNodeMetadataSnapshotToNode(WorkerNode *workerNode, bool raiseOnError); static void DropMetadataSnapshotOnNode(WorkerNode *workerNode); +static List * IdentitySequenceDependencyCommandListLegacy(Oid targetRelationId); +static void FetchSequenceState(Oid sequenceId, int64 *lastValue, bool *isCalled); static char * CreateSequenceDependencyCommand(Oid relationId, Oid sequenceId, char *columnName); static GrantStmt * GenerateGrantStmtForRights(ObjectType objectType, @@ -265,19 +267,19 @@ start_metadata_sync_to_all_nodes(PG_FUNCTION_ARGS) /* - * SyncCitusTableMetadata syncs citus table metadata to worker nodes with metadata. + * SyncCitusTableMetadata syncs citus table metadata to remote nodes with metadata. * Our definition of metadata includes the shell table and its inter relations with * other shell tables, corresponding pg_dist_object, pg_dist_partiton, pg_dist_shard * and pg_dist_shard placement entries. This function also propagates the views that - * depend on the given relation, to the metadata workers, and adds the relation to + * depend on the given relation, to the remote metadata nodes, and adds the relation to * the appropriate publications. */ void SyncCitusTableMetadata(Oid relationId) { - CreateShellTableOnWorkers(relationId); - CreateTableMetadataOnWorkers(relationId); - CreateInterTableRelationshipOfRelationOnWorkers(relationId); + CreateShellTableOnRemoteNodes(relationId); + CreateTableMetadataOnRemoteNodes(relationId); + CreateInterTableRelationshipOfRelationOnRemoteNodes(relationId); if (!IsTableOwnedByExtension(relationId)) { @@ -286,17 +288,17 @@ SyncCitusTableMetadata(Oid relationId) MarkObjectDistributed(&relationAddress); } - CreateDependingViewsOnWorkers(relationId); + CreateDependingViewsOnRemoteNodes(relationId); AddTableToPublications(relationId); } /* - * CreateDependingViewsOnWorkers takes a relationId and creates the views that depend on - * that relation on workers with metadata. Propagated views are marked as distributed. + * CreateDependingViewsOnRemoteNodes takes a relationId and creates the views that depend on + * that relation on remote nodes with metadata. Propagated views are marked as distributed. */ static void -CreateDependingViewsOnWorkers(Oid relationId) +CreateDependingViewsOnRemoteNodes(Oid relationId) { List *views = GetDependingViews(relationId); @@ -306,7 +308,7 @@ CreateDependingViewsOnWorkers(Oid relationId) return; } - SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); + SendCommandToRemoteNodesWithMetadata(DISABLE_DDL_PROPAGATION); Oid viewOid = InvalidOid; foreach_declared_oid(viewOid, views) @@ -323,18 +325,18 @@ CreateDependingViewsOnWorkers(Oid relationId) char *createViewCommand = CreateViewDDLCommand(viewOid); char *alterViewOwnerCommand = AlterViewOwnerCommand(viewOid); - SendCommandToWorkersWithMetadata(createViewCommand); - SendCommandToWorkersWithMetadata(alterViewOwnerCommand); + SendCommandToRemoteNodesWithMetadata(createViewCommand); + SendCommandToRemoteNodesWithMetadata(alterViewOwnerCommand); MarkObjectDistributed(viewAddress); } - SendCommandToWorkersWithMetadata(ENABLE_DDL_PROPAGATION); + SendCommandToRemoteNodesWithMetadata(ENABLE_DDL_PROPAGATION); } /* - * AddTableToPublications adds the table to a publication on workers with metadata. + * AddTableToPublications adds the table to a publication on remote nodes with metadata. */ static void AddTableToPublications(Oid relationId) @@ -347,7 +349,7 @@ AddTableToPublications(Oid relationId) Oid publicationId = InvalidOid; - SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); + SendCommandToRemoteNodesWithMetadata(DISABLE_DDL_PROPAGATION); foreach_declared_oid(publicationId, publicationIds) { @@ -369,10 +371,10 @@ AddTableToPublications(Oid relationId) GetAlterPublicationTableDDLCommand(publicationId, relationId, isAdd); /* send ALTER PUBLICATION .. ADD to workers with metadata */ - SendCommandToWorkersWithMetadata(alterPublicationCommand); + SendCommandToRemoteNodesWithMetadata(alterPublicationCommand); } - SendCommandToWorkersWithMetadata(ENABLE_DDL_PROPAGATION); + SendCommandToRemoteNodesWithMetadata(ENABLE_DDL_PROPAGATION); } @@ -460,6 +462,17 @@ stop_metadata_sync_to_node(PG_FUNCTION_ARGS) { ereport(NOTICE, (errmsg("dropping metadata on the node (%s,%d)", nodeNameString, nodePort))); + + /* + * Note that we don't yet reset the local group id on the to node we + * stop syncing metadata to. This is because, resetting the local group + * id means setting it to COORDINATOR_GROUP_ID, and we don't yet want it + * to assume that it's the coordinator as we still have it as a worker + * in the metadata. + * + * We reset local group id only after / if we remove the node from the + * metadata, see RemoveNodeFromCluster(). + */ DropMetadataSnapshotOnNode(workerNode); } else @@ -701,8 +714,6 @@ DropMetadataSnapshotOnNode(WorkerNode *workerNode) WorkerDropAllShellTablesCommand(singleTransaction)); dropMetadataCommandList = list_concat(dropMetadataCommandList, NodeMetadataDropCommands()); - dropMetadataCommandList = lappend(dropMetadataCommandList, - LocalGroupIdUpdateCommand(0)); /* remove all dist table and object/table related metadata afterwards */ dropMetadataCommandList = lappend(dropMetadataCommandList, DELETE_ALL_PARTITIONS); @@ -1481,11 +1492,57 @@ DDLCommandsForSequence(Oid sequenceOid, char *ownerName) Oid sequenceTypeOid = sequenceData->seqtypid; char *typeName = format_type_be(sequenceTypeOid); - /* create schema if needed */ - appendStringInfo(wrappedSequenceDef, - WORKER_APPLY_SEQUENCE_COMMAND, - escapedSequenceDef, - quote_literal_cstr(typeName)); + /* + * WORKER_APPLY_SEQUENCE_COMMAND_LEGACY differs from + * WORKER_APPLY_SEQUENCE_COMMAND in that it does not + * accept last_value and is_called params, and does + * not set the initial sequence value when called on + * the coordinator. + * + * The initial value must be set only when creating + * sequence dependencies on the coordinator for + * operations initiated from a worker. In that case, on + * the coordinator, we need to continue after the last + * value used on the worker so the coordinator can safely + * assume the full sequence range. + * + * For operations initiated from the coordinator, this is + * unnecessary since all remote nodes are workers. While + * it would be safe to always use + * WORKER_APPLY_SEQUENCE_COMMAND (the underlying UDF skips + * setting the value when the target node is a worker), we + * use the legacy variant to preserve compatibility with + * mixed-version clusters. + * + * Therefore, for now we use + * WORKER_APPLY_SEQUENCE_COMMAND_LEGACY when the operation + * is initiated from the coordinator. In Citus 15.0, we + * will remove WORKER_APPLY_SEQUENCE_COMMAND_LEGACY and will + * delete the legacy code path, the first branch of the if + * statement below. + */ + if (IsCoordinator()) + { + appendStringInfo(wrappedSequenceDef, + WORKER_APPLY_SEQUENCE_COMMAND_LEGACY, + escapedSequenceDef, + quote_literal_cstr(typeName)); + } + else + { + /* prevent concurrent updates to the sequence until the end of the transaction */ + LockRelationOid(sequenceOid, RowExclusiveLock); + + int64 lastValue = 0; + bool isCalled = false; + FetchSequenceState(sequenceOid, &lastValue, &isCalled); + + appendStringInfo(wrappedSequenceDef, + WORKER_APPLY_SEQUENCE_COMMAND, + escapedSequenceDef, + quote_literal_cstr(typeName), + lastValue, isCalled ? "true" : "false"); + } appendStringInfo(sequenceGrantStmt, "ALTER SEQUENCE %s OWNER TO %s", sequenceName, @@ -1958,12 +2015,103 @@ SequenceDependencyCommandList(Oid relationId) /* - * IdentitySequenceDependencyCommandList generate a command to execute - * a UDF (WORKER_ADJUST_IDENTITY_COLUMN_SEQ_RANGES) on workers to modify the identity - * columns min/max values to produce unique values on workers. + * IdentitySequenceDependencyCommandList, when called from the coordinator, + * generates a list of commands to execute + * WORKER_ADJUST_IDENTITY_COLUMN_SEQ_SETTINGS for each identity sequence of + * the given relation on remote nodes to i) set identity column min/max + * values to produce unique values on workers and ii) set the sequence + * last_value and is_called on the coordinator to continue after the maximum + * value used so far. + * + * When called from the coordinator, we directly use + * IdentitySequenceDependencyCommandListLegacy() and exit. The most + * significant difference between IdentitySequenceDependencyCommandListLegacy() + * and the rest of this function is that the legacy implementation discovers + * identity column sequences on the worker and only sets their min/max + * values, whereas the rest of the function discovers identity column + * sequences on the local node and sends separate commands for each one so + * it can also set last_value and is_called for each sequence when run on the + * coordinator. + * + * The initial value must be set only when creating identity column + * dependencies on the coordinator for operations initiated from a worker. + * In that case, on the coordinator, we need to continue after the value last + * used on the worker so the coordinator can safely assume the full sequence + * range. + * + * For operations initiated from the coordinator, this is unnecessary + * since all remote nodes are workers. While it would be safe to never use + * the legacy code path (the underlying UDF skips setting the value when + * the target node is a worker), we use the legacy variant to preserve + * compatibility with mixed-version clusters. + * + * Therefore, for now we use IdentitySequenceDependencyCommandListLegacy() + * when the operation is initiated from the coordinator. In Citus 15.0, we + * will remove IdentitySequenceDependencyCommandListLegacy() and delete the + * legacy code path, i.e. the first if-statement below. */ List * IdentitySequenceDependencyCommandList(Oid targetRelationId) +{ + if (IsCoordinator()) + { + return IdentitySequenceDependencyCommandListLegacy(targetRelationId); + } + + List *commandList = NIL; + + Relation relation = relation_open(targetRelationId, AccessShareLock); + TupleDesc tupleDescriptor = RelationGetDescr(relation); + + for (int attributeIndex = 0; attributeIndex < tupleDescriptor->natts; + attributeIndex++) + { + Form_pg_attribute attributeForm = TupleDescAttr(tupleDescriptor, + attributeIndex); + + if (attributeForm->attisdropped || !attributeForm->attidentity) + { + continue; + } + + bool missingOk = false; + Oid sequenceId = getIdentitySequence( + identitySequenceRelation_compat(relation), + attributeForm->attnum, + missingOk + ); + + char *qualifiedSequenceName = generate_qualified_relation_name(sequenceId); + + /* prevent concurrent updates to the sequence until the end of the transaction */ + LockRelationOid(sequenceId, RowExclusiveLock); + + int64 lastValue = 0; + bool isCalled = false; + FetchSequenceState(sequenceId, &lastValue, &isCalled); + + StringInfo stringInfo = makeStringInfo(); + appendStringInfo(stringInfo, + WORKER_ADJUST_IDENTITY_COLUMN_SEQ_SETTINGS, + quote_literal_cstr(qualifiedSequenceName), + lastValue, isCalled ? "true" : "false"); + + commandList = lappend(commandList, + makeTableDDLCommandString(stringInfo->data)); + } + + relation_close(relation, NoLock); + + return commandList; +} + + +/* + * IdentitySequenceDependencyCommandListLegacy is the legacy way to update + * identity sequence ranges on workers, see IdentitySequenceDependencyCommandList(). + */ +static List * +IdentitySequenceDependencyCommandListLegacy(Oid targetRelationId) { List *commandList = NIL; @@ -1991,7 +2139,7 @@ IdentitySequenceDependencyCommandList(Oid targetRelationId) char *tableName = generate_qualified_relation_name(targetRelationId); appendStringInfo(stringInfo, - WORKER_ADJUST_IDENTITY_COLUMN_SEQ_RANGES, + WORKER_ADJUST_IDENTITY_COLUMN_SEQ_RANGES_LEGACY, quote_literal_cstr(tableName)); @@ -2004,6 +2152,82 @@ IdentitySequenceDependencyCommandList(Oid targetRelationId) } +/* + * FetchSequenceState fetches the last_value and is_called for the sequence with + * given oid. + */ +static void +FetchSequenceState(Oid sequenceId, int64 *lastValue, bool *isCalled) +{ + char *qualifiedSequenceName = generate_qualified_relation_name(sequenceId); + + StringInfo query = makeStringInfo(); + appendStringInfo(query, "SELECT last_value, is_called FROM %s", + qualifiedSequenceName); + + bool spiConnected = false; + + PG_TRY(); + { + int spiStatus = SPI_connect(); + if (spiStatus != SPI_OK_CONNECT) + { + elog(ERROR, "SPI_connect failed: %d", spiStatus); + } + + spiConnected = true; + + spiStatus = SPI_execute(query->data, true, 1); + if (spiStatus != SPI_OK_SELECT) + { + elog(ERROR, "SPI_execute failed: %d", spiStatus); + } + + if (SPI_processed != 1 || SPI_tuptable == NULL || + SPI_tuptable->tupdesc == NULL || + SPI_tuptable->tupdesc->natts != 2) + { + elog(ERROR, "could not properly fetch last_value for sequence %s", + qualifiedSequenceName); + } + + bool isNull = false; + + Datum lastValueDatum = SPI_getbinval(SPI_tuptable->vals[0], + SPI_tuptable->tupdesc, + 1, &isNull); + if (isNull) + { + elog(ERROR, "last_value for sequence %s is NULL", qualifiedSequenceName); + } + + *lastValue = DatumGetInt64(lastValueDatum); + + Datum isCalledDatum = SPI_getbinval(SPI_tuptable->vals[0], + SPI_tuptable->tupdesc, + 2, &isNull); + if (isNull) + { + elog(ERROR, "is_called for sequence %s is NULL", qualifiedSequenceName); + } + + *isCalled = DatumGetBool(isCalledDatum); + + SPI_finish(); + spiConnected = false; + } + PG_CATCH(); + { + if (spiConnected) + { + SPI_finish(); + } + PG_RE_THROW(); + } + PG_END_TRY(); +} + + /* * CreateSequenceDependencyCommand generates a query string for calling * worker_record_sequence_dependency on the worker to recreate a sequence->table @@ -2753,11 +2977,11 @@ HasMetadataWorkers(void) /* - * CreateInterTableRelationshipOfRelationOnWorkers create inter table relationship - * for the the given relation id on each worker node with metadata. + * CreateInterTableRelationshipOfRelationOnRemoteNodes create inter table relationship + * for the the given relation id on each remote node with metadata. */ void -CreateInterTableRelationshipOfRelationOnWorkers(Oid relationId) +CreateInterTableRelationshipOfRelationOnRemoteNodes(Oid relationId) { /* if the table is owned by an extension we don't create */ bool tableOwnedByExtension = IsTableOwnedByExtension(relationId); @@ -2770,12 +2994,12 @@ CreateInterTableRelationshipOfRelationOnWorkers(Oid relationId) InterTableRelationshipOfRelationCommandList(relationId); /* prevent recursive propagation */ - SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); + SendCommandToRemoteNodesWithMetadata(DISABLE_DDL_PROPAGATION); const char *command = NULL; foreach_declared_ptr(command, commandList) { - SendCommandToWorkersWithMetadata(command); + SendCommandToRemoteNodesWithMetadata(command); } } @@ -2803,63 +3027,85 @@ InterTableRelationshipOfRelationCommandList(Oid relationId) /* - * CreateShellTableOnWorkers creates the shell table on each worker node with metadata + * CreateShellTableOnRemoteNodes creates the shell table on each remote node with metadata * including sequence dependency and truncate triggers. */ static void -CreateShellTableOnWorkers(Oid relationId) +CreateShellTableOnRemoteNodes(Oid relationId) { if (IsTableOwnedByExtension(relationId)) { return; } - List *commandList = list_make1(DISABLE_DDL_PROPAGATION); - + /* 1 - collect commands to be executed on remote workers and execute them */ IncludeSequenceDefaults includeSequenceDefaults = WORKER_NEXTVAL_SEQUENCE_DEFAULTS; IncludeIdentities includeIdentityDefaults = INCLUDE_IDENTITY; - bool creatingShellTableOnRemoteNode = true; List *tableDDLCommands = GetFullTableCreationCommands(relationId, includeSequenceDefaults, includeIdentityDefaults, creatingShellTableOnRemoteNode); + SendCommandToRemoteWorkersWithMetadata(DISABLE_DDL_PROPAGATION); + TableDDLCommand *tableDDLCommand = NULL; foreach_declared_ptr(tableDDLCommand, tableDDLCommands) { Assert(CitusIsA(tableDDLCommand, TableDDLCommand)); - commandList = lappend(commandList, GetTableDDLCommand(tableDDLCommand)); + SendCommandToRemoteWorkersWithMetadata(GetTableDDLCommand(tableDDLCommand)); } - const char *command = NULL; - foreach_declared_ptr(command, commandList) + /* + * 2 - if this is not the coordinator, need to create the shell table on + * the coordinator as well. + * + * The only difference in the commands to be executed on coordinator vs + * remote workers is that while we use WORKER_NEXTVAL_SEQUENCE_DEFAULTS + * for remote workers to set int / smallint sequence defaults, we use + * NEXTVAL_SEQUENCE_DEFAULTS for coordinator to set the defaults to + * nextval(..). + */ + if (!IsCoordinator()) { - SendCommandToWorkersWithMetadata(command); + includeSequenceDefaults = NEXTVAL_SEQUENCE_DEFAULTS; + tableDDLCommands = GetFullTableCreationCommands(relationId, + includeSequenceDefaults, + includeIdentityDefaults, + creatingShellTableOnRemoteNode); + + SendCommandToCoordinator(DISABLE_DDL_PROPAGATION); + + tableDDLCommand = NULL; + foreach_declared_ptr(tableDDLCommand, tableDDLCommands) + { + Assert(CitusIsA(tableDDLCommand, TableDDLCommand)); + SendCommandToCoordinator(GetTableDDLCommand(tableDDLCommand)); + } } } /* - * CreateTableMetadataOnWorkers creates the list of commands needed to create the - * metadata of the given distributed table and sends these commands to all metadata - * workers i.e. workers with hasmetadata=true. Before sending the commands, in order + * CreateTableMetadataOnRemoteNodes creates the list of commands needed to + * create the metadata of the given distributed table and sends these commands to all + * remote metadata nodes i.e. hasmetadata=true. Before sending the commands, in order * to prevent recursive propagation, DDL propagation on workers are disabled with a * `SET citus.enable_ddl_propagation TO off;` command. */ static void -CreateTableMetadataOnWorkers(Oid relationId) +CreateTableMetadataOnRemoteNodes(Oid relationId) { List *commandList = CitusTableMetadataCreateCommandList(relationId); /* prevent recursive propagation */ - SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); + SendCommandToRemoteNodesWithMetadata(DISABLE_DDL_PROPAGATION); /* send the commands one by one */ const char *command = NULL; foreach_declared_ptr(command, commandList) { - SendCommandToWorkersWithMetadata(command); + SendCommandToRemoteNodesWithMetadata(command); } } @@ -4180,7 +4426,7 @@ SyncNewColocationGroupToNodes(uint32 colocationId, int shardCount, int replicati * We require superuser for all pg_dist_colocation operations because we have * no reasonable way of restricting access. */ - SendCommandToWorkersWithMetadataViaSuperUser(command); + SendCommandToRemoteNodesWithMetadataViaSuperUser(command); } @@ -4357,7 +4603,7 @@ SyncDeleteColocationGroupToNodes(uint32 colocationId) * We require superuser for all pg_dist_colocation operations because we have * no reasonable way of restricting access. */ - SendCommandToWorkersWithMetadataViaSuperUser(command); + SendCommandToRemoteNodesWithMetadataViaSuperUser(command); } diff --git a/src/backend/distributed/metadata/metadata_utility.c b/src/backend/distributed/metadata/metadata_utility.c index c2ffcb5c70c..91b85df8f55 100644 --- a/src/backend/distributed/metadata/metadata_utility.c +++ b/src/backend/distributed/metadata/metadata_utility.c @@ -1450,13 +1450,13 @@ IsActiveShardPlacement(ShardPlacement *shardPlacement) /* - * IsRemoteShardPlacement returns true if the shard placement is on a remote - * node. + * IsNonCoordShardPlacement returns true if the shard placement is on a node + * other than coordinator. */ bool -IsRemoteShardPlacement(ShardPlacement *shardPlacement) +IsNonCoordShardPlacement(ShardPlacement *shardPlacement) { - return shardPlacement->groupId != GetLocalGroupId(); + return shardPlacement->groupId != COORDINATOR_GROUP_ID; } @@ -1857,7 +1857,7 @@ InsertShardPlacementRowGlobally(uint64 shardId, uint64 placementId, char *insertPlacementCommand = AddPlacementMetadataCommand(shardId, placementId, shardLength, groupId); - SendCommandToWorkersWithMetadata(insertPlacementCommand); + SendCommandToRemoteNodesWithMetadata(insertPlacementCommand); return LoadShardPlacement(shardId, placementId); } @@ -1882,7 +1882,7 @@ InsertShardPlacementRow(uint64 shardId, uint64 placementId, if (placementId == INVALID_PLACEMENT_ID) { - placementId = master_get_new_placementid(NULL); + placementId = GetNextPlacementId(); } values[Anum_pg_dist_placement_placementid - 1] = Int64GetDatum(placementId); values[Anum_pg_dist_placement_shardid - 1] = Int64GetDatum(shardId); @@ -2092,7 +2092,7 @@ DeleteShardPlacementRowGlobally(uint64 placementId) char *deletePlacementCommand = DeletePlacementMetadataCommand(placementId); - SendCommandToWorkersWithMetadata(deletePlacementCommand); + SendCommandToRemoteNodesWithMetadata(deletePlacementCommand); } @@ -2368,7 +2368,7 @@ UpdateNoneDistTableMetadataGlobally(Oid relationId, char replicationModel, replicationModel, colocationId, autoConverted); - SendCommandToWorkersWithMetadata(metadataCommand); + SendCommandToRemoteNodesWithMetadata(metadataCommand); } } diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index e662dca8d3a..6174027a4fb 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -58,6 +58,7 @@ #include "distributed/shardinterval_utils.h" #include "distributed/shared_connection_stats.h" #include "distributed/string_utils.h" +#include "distributed/tenant_schema_metadata.h" #include "distributed/transaction_recovery.h" #include "distributed/version_compat.h" #include "distributed/worker_manager.h" @@ -109,6 +110,7 @@ static void InsertNodeRow(int nodeid, char *nodename, int32 nodeport, NodeMetadata *nodeMetadata); static void DeleteNodeRow(char *nodename, int32 nodeport); static void BlockDistributedQueriesOnMetadataNodes(void); +static char * LockPgDistNodeCommand(LOCKMODE lockMode); static WorkerNode * TupleToWorkerNode(Relation pgDistNode, TupleDesc tupleDescriptor, HeapTuple heapTuple); static bool NodeIsLocal(WorkerNode *worker); @@ -680,8 +682,49 @@ BlockDistributedQueriesOnMetadataNodes(void) /* only superuser can disable node */ Assert(superuser()); - SendCommandToWorkersWithMetadata( - "LOCK TABLE pg_catalog.pg_dist_node IN EXCLUSIVE MODE;"); + SendCommandToWorkersWithMetadata(LockPgDistNodeCommand(ExclusiveLock)); +} + + +/* + * LockPgDistNodeOnCoordinatorViaSuperUser locks pg_dist_node on coordinator via + * superuser connection. This is mostly useful when we're on a worker and need + * to acquire a lock pg_dist_node on coordinator. + * + * Note that only superuser can lock pg_dist_node, so we always send the command + * using a superuser connection. + * + * Also note that we should not have acquired a stronger lock on pg_dist_node + * earlier in the transaction, and we should not acquire one later using a + * non-superuser connection, because that would lead to a self-deadlock. + * + * Today, none of the operations that can be initiated from a worker can + * directly acquire such a strong lock on pg_dist_node on the coordinator and + * cannot also cause the coordinator to acquire one internally. However, if + * in the future allowing a new feature from workers causes such an issue, then + * we might want to consider introducing a UDF that executes such a LOCK command + * by temporarily escalating privileges to superuser. However, in practice it + * would be difficult to safely restrict the usage of such a UDF, so for now we + * choose to rely on a superuser connection instead. + */ +void +LockPgDistNodeOnCoordinatorViaSuperUser(LOCKMODE lockMode) +{ + SendCommandToCoordinatorViaSuperUser(LockPgDistNodeCommand(lockMode)); +} + + +/* + * LockPgDistNodeCommand returns the command to acquire the lock on pg_dist_node + * with the given lock mode. + */ +static char * +LockPgDistNodeCommand(LOCKMODE lockMode) +{ + StringInfo lockCommand = makeStringInfo(); + appendStringInfo(lockCommand, "LOCK TABLE pg_catalog.pg_dist_node IN %s MODE;", + LockModeToLockModeText(lockMode)); + return lockCommand->data; } @@ -2377,6 +2420,11 @@ RemoveNodeFromCluster(char *nodeName, int32 nodePort) DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId, localOnly); + /* reset local group id for the node to be removed */ + char *updateLocalGroupIdCommand = LocalGroupIdUpdateCommand(0); + SendOptionalMetadataCommandListToWorkerInCoordinatedTransaction( + nodeName, nodePort, CurrentUserName(), list_make1(updateLocalGroupIdCommand)); + /* * Secondary nodes are read-only, never 2PC is used. * Hence, no items can be inserted to pg_dist_transaction @@ -3170,6 +3218,25 @@ EnsurePropagationToCoordinator(void) } +/* + * EnsureCoordinatorUnlessTenantSchema ensures propagation to the the coordinator + * if the relation belongs to a tenant schema. Otherwise, it checks whether we're + * on the coordinator. + */ +void +EnsureCoordinatorUnlessTenantSchema(Oid relationId) +{ + if (IsTenantSchema(get_rel_namespace(relationId))) + { + EnsurePropagationToCoordinator(); + } + else + { + EnsureCoordinator(); + } +} + + /* * EnsureCoordinatorIsInMetadata checks whether the coordinator is added to the * metadata, which is required for many operations. diff --git a/src/backend/distributed/operations/create_shards.c b/src/backend/distributed/operations/create_shards.c index 1553de92f33..3c2b65f40a3 100644 --- a/src/backend/distributed/operations/create_shards.c +++ b/src/backend/distributed/operations/create_shards.c @@ -410,7 +410,17 @@ CreateSingleShardTableShardWithRoundRobinPolicy(Oid relationId, uint32 colocatio * * Also take a RowShareLock on pg_dist_node to disallow concurrent * node list changes that require an exclusive lock. + * + * If we're on a worker, first acquire the lock on the coordinator via + * the remote metadata connection to the coordinator as superuser. Fwiw, + * we'll acquire the lock on the local node as well via + * DistributedTablePlacementNodeList(). */ + if (!IsCoordinator()) + { + LockPgDistNodeOnCoordinatorViaSuperUser(RowShareLock); + } + List *workerNodeList = DistributedTablePlacementNodeList(RowShareLock); workerNodeList = SortList(workerNodeList, CompareWorkerNodes); diff --git a/src/backend/distributed/operations/delete_protocol.c b/src/backend/distributed/operations/delete_protocol.c index 70080f6378c..c09fd242263 100644 --- a/src/backend/distributed/operations/delete_protocol.c +++ b/src/backend/distributed/operations/delete_protocol.c @@ -130,7 +130,11 @@ citus_drop_all_shards(PG_FUNCTION_ARGS) PG_RETURN_INT32(-1); } - EnsureCoordinator(); + /* + * Today we support DROP from workers only if the table is a + * distributed-schema table, but it's okay to not ensure this here. + */ + EnsurePropagationToCoordinator(); CheckTableSchemaNameForDrop(relationId, &schemaName, &relationName); /* @@ -241,9 +245,6 @@ DropShards(Oid relationId, char *schemaName, char *relationName, */ int32 localGroupId = GetLocalGroupId(); - /* DROP table commands are currently only supported from the coordinator */ - Assert(localGroupId == COORDINATOR_GROUP_ID); - Use2PCForCoordinatedTransaction(); List *dropTaskList = DropTaskList(relationId, schemaName, relationName, @@ -270,8 +271,7 @@ DropShards(Oid relationId, char *schemaName, char *relationName, * delete the shard placement metadata and skip dropping the shard for now. */ bool skipIfDropSchemaOrDBInProgress = isLocalShardPlacement && - DropSchemaOrDBInProgress() && - localGroupId == COORDINATOR_GROUP_ID; + DropSchemaOrDBInProgress(); /* * We want to send commands to drop shards when both diff --git a/src/backend/distributed/operations/node_protocol.c b/src/backend/distributed/operations/node_protocol.c index 0a7760bd179..5dcc012cd69 100644 --- a/src/backend/distributed/operations/node_protocol.c +++ b/src/backend/distributed/operations/node_protocol.c @@ -60,10 +60,12 @@ #include "distributed/coordinator_protocol.h" #include "distributed/deparser.h" #include "distributed/listutils.h" +#include "distributed/lock_graph.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/namespace_utils.h" #include "distributed/pg_dist_shard.h" +#include "distributed/remote_commands.h" #include "distributed/shared_library_init.h" #include "distributed/version_compat.h" #include "distributed/worker_manager.h" @@ -74,6 +76,10 @@ int ShardReplicationFactor = 1; /* desired replication factor for shards */ int NextShardId = 0; int NextPlacementId = 0; +static int64 GetNextShardIdFromNode(WorkerNode *node); +static uint64 GetNextShardIdInternal(void); +static int64 GetNextPlacementIdFromNode(WorkerNode *node); +static uint64 GetNextPlacementIdInternal(void); static void GatherIndexAndConstraintDefinitionListExcludingReplicaIdentity(Form_pg_index indexForm, List ** @@ -189,7 +195,84 @@ master_get_table_ddl_events(PG_FUNCTION_ARGS) /* - * master_get_new_shardid is a user facing wrapper function around GetNextShardId() + * GetNextShardId retrieves the next shard id either from the local + * node if it's the coordinator or retrieves it from the coordinator otherwise. + * + * Throws an error for the latter case if the coordinator is not in metadata. + */ +uint64 +GetNextShardId(void) +{ + uint64 shardId = INVALID_SHARD_ID; + if (IsCoordinator()) + { + shardId = GetNextShardIdInternal(); + } + else + { + /* + * If we're not on the coordinator, retrieve the next id from the + * coordinator node. Although all nodes have the sequence, we don't + * synchronize the sequences that are part of the Citus metadata + * across nodes, so we need to get the next value from the + * coordinator. + * + * Note that before this point, we should have already verified + * that coordinator is added into the metadata. + */ + WorkerNode *coordinator = CoordinatorNodeIfAddedAsWorkerOrError(); + shardId = GetNextShardIdFromNode(coordinator); + } + + return shardId; +} + + +/* + * GetNextShardIdFromNode gets the next shard id from given + * node by calling pg_catalog.master_get_new_shardid() function. + */ +static int64 +GetNextShardIdFromNode(WorkerNode *node) +{ + const char *nodeName = node->workerName; + int nodePort = node->workerPort; + uint32 connectionFlags = 0; + MultiConnection *connection = GetNodeConnection(connectionFlags, nodeName, nodePort); + + int querySent = SendRemoteCommand(connection, + "SELECT pg_catalog.master_get_new_shardid();"); + if (querySent == 0) + { + ReportConnectionError(connection, ERROR); + } + + bool raiseInterrupts = true; + PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); + if (!IsResponseOK(result)) + { + ReportResultError(connection, result, ERROR); + } + + int64 rowCount = PQntuples(result); + int64 colCount = PQnfields(result); + if (rowCount != 1 || colCount != 1) + { + ereport(ERROR, (errmsg("unexpected result from the node when getting " + "next shard id"))); + } + + int64 shardId = ParseIntField(result, 0, 0); + + PQclear(result); + ForgetResults(connection); + + return shardId; +} + + +/* + * master_get_new_shardid is a user facing wrapper function around GetNextShardIdInternal() * which allocates and returns a unique shardId for the shard to be created. * * NB: This can be called by any user; for now we have decided that that's @@ -202,7 +285,7 @@ master_get_new_shardid(PG_FUNCTION_ARGS) CheckCitusVersion(ERROR); EnsureCoordinator(); - uint64 shardId = GetNextShardId(); + uint64 shardId = GetNextShardIdInternal(); Datum shardIdDatum = Int64GetDatum(shardId); PG_RETURN_DATUM(shardIdDatum); @@ -210,15 +293,15 @@ master_get_new_shardid(PG_FUNCTION_ARGS) /* - * GetNextShardId allocates and returns a unique shardId for the shard to be + * GetNextShardIdInternal allocates and returns a unique shardId for the shard to be * created. This allocation occurs both in shared memory and in write ahead * logs; writing to logs avoids the risk of having shardId collisions. * * Please note that the caller is still responsible for finalizing shard data * and the shardId with the master node. */ -uint64 -GetNextShardId() +static uint64 +GetNextShardIdInternal(void) { Oid savedUserId = InvalidOid; int savedSecurityContext = 0; @@ -257,9 +340,86 @@ GetNextShardId() } +/* + * GetNextPlacementId retrieves the next placement id either from the local + * node if it's the coordinator or retrieves it from the coordinator otherwise. + * + * Throws an error for the latter case if the coordinator is not in metadata. + */ +uint64 +GetNextPlacementId(void) +{ + uint64 placementId = INVALID_PLACEMENT_ID; + if (IsCoordinator()) + { + placementId = GetNextPlacementIdInternal(); + } + else + { + /* + * If we're not on the coordinator, retrieve the next id from the + * coordinator node. Although all nodes have the sequence, we don't + * synchronize the sequences that are part of the Citus metadata + * across nodes, so we need to get the next value from the + * coordinator. + * + * Note that before this point, we should have already verified + * that coordinator is added into the metadata. + */ + WorkerNode *coordinator = CoordinatorNodeIfAddedAsWorkerOrError(); + placementId = GetNextPlacementIdFromNode(coordinator); + } + + return placementId; +} + + +/* + * GetNextPlacementIdFromNode gets the next placement id from given + * node by calling pg_catalog.master_get_new_placementid() function. + */ +static int64 +GetNextPlacementIdFromNode(WorkerNode *node) +{ + const char *nodeName = node->workerName; + int nodePort = node->workerPort; + uint32 connectionFlags = 0; + MultiConnection *connection = GetNodeConnection(connectionFlags, nodeName, nodePort); + + int querySent = SendRemoteCommand(connection, + "SELECT pg_catalog.master_get_new_placementid();"); + if (querySent == 0) + { + ReportConnectionError(connection, ERROR); + } + + bool raiseInterrupts = true; + PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); + if (!IsResponseOK(result)) + { + ReportResultError(connection, result, ERROR); + } + + int64 rowCount = PQntuples(result); + int64 colCount = PQnfields(result); + if (rowCount != 1 || colCount != 1) + { + ereport(ERROR, (errmsg("unexpected result from the node when getting " + "next placement id"))); + } + + int64 placementId = ParseIntField(result, 0, 0); + + PQclear(result); + ForgetResults(connection); + + return placementId; +} + + /* * master_get_new_placementid is a user facing wrapper function around - * GetNextPlacementId() which allocates and returns a unique placement id for the + * GetNextPlacementIdInternal() which allocates and returns a unique placement id for the * placement to be created. * * NB: This can be called by any user; for now we have decided that that's @@ -272,7 +432,7 @@ master_get_new_placementid(PG_FUNCTION_ARGS) CheckCitusVersion(ERROR); EnsureCoordinator(); - uint64 placementId = GetNextPlacementId(); + uint64 placementId = GetNextPlacementIdInternal(); Datum placementIdDatum = Int64GetDatum(placementId); PG_RETURN_DATUM(placementIdDatum); @@ -280,7 +440,7 @@ master_get_new_placementid(PG_FUNCTION_ARGS) /* - * GetNextPlacementId allocates and returns a unique placementId for + * GetNextPlacementIdInternal allocates and returns a unique placementId for * the placement to be created. This allocation occurs both in shared memory * and in write ahead logs; writing to logs avoids the risk of having placementId * collisions. @@ -289,8 +449,8 @@ master_get_new_placementid(PG_FUNCTION_ARGS) * ok. We might want to restrict this to users part of a specific role or such * at some later point. */ -uint64 -GetNextPlacementId(void) +static uint64 +GetNextPlacementIdInternal(void) { Oid savedUserId = InvalidOid; int savedSecurityContext = 0; diff --git a/src/backend/distributed/operations/replicate_none_dist_table_shard.c b/src/backend/distributed/operations/replicate_none_dist_table_shard.c index aa48b488a2f..56571d00ddf 100644 --- a/src/backend/distributed/operations/replicate_none_dist_table_shard.c +++ b/src/backend/distributed/operations/replicate_none_dist_table_shard.c @@ -17,49 +17,72 @@ #include "distributed/adaptive_executor.h" #include "distributed/commands.h" +#include "distributed/commands/multi_copy.h" #include "distributed/commands/utility_hook.h" #include "distributed/coordinator_protocol.h" #include "distributed/deparse_shard_query.h" #include "distributed/listutils.h" +#include "distributed/multi_executor.h" +#include "distributed/multi_partitioning_utils.h" #include "distributed/replicate_none_dist_table_shard.h" #include "distributed/shard_utils.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" +#include "distributed/worker_shard_copy.h" +static PlannedStmt * PlanReadCopyableColumnsFromTableQuery(Oid relationId); +static void CopyFromQueryIntoNoneDistTable(PlannedStmt *selectPlan, + Oid noneDistTableId); static void CreateForeignKeysFromReferenceTablesOnShards(Oid noneDistTableId); static Oid ForeignConstraintGetReferencingTableId(const char *queryString); static void EnsureNoneDistTableWithCoordinatorPlacement(Oid noneDistTableId); -static void SetLocalEnableManualChangesToShard(bool state); /* - * NoneDistTableReplicateCoordinatorPlacement replicates local (presumably - * coordinator) shard placement of given none-distributed table to given + * NoneDistTableReplicateCoordinatorPlacement replicates the coordinator + * shard placement of given none-distributed table to given * target nodes and inserts records for new placements into pg_dist_placement. */ void NoneDistTableReplicateCoordinatorPlacement(Oid noneDistTableId, List *targetNodeList) { - EnsureCoordinator(); + EnsurePropagationToCoordinator(); EnsureNoneDistTableWithCoordinatorPlacement(noneDistTableId); /* - * We don't expect callers try to replicate the shard to remote nodes - * if some of the remote nodes have a placement for the shard already. + * We don't expect callers try to replicate the shard to worker nodes + * if some of the worker nodes have a placement for the shard already. */ int64 shardId = GetFirstShardId(noneDistTableId); - List *remoteShardPlacementList = + List *nonCoordShardPlacementList = FilterShardPlacementList(ActiveShardPlacementList(shardId), - IsRemoteShardPlacement); - if (list_length(remoteShardPlacementList) > 0) + IsNonCoordShardPlacement); + if (list_length(nonCoordShardPlacementList) > 0) { - ereport(ERROR, (errmsg("table already has a remote shard placement"))); + ereport(ERROR, (errmsg("table already has a shard placement on a worker"))); } uint64 shardLength = ShardLength(shardId); + /* + * If it's not a partitioned table, then plan the query to read data + * from the coordinator placement. We plan the query before inserting + * new placements to force the planner to consider only the coordinator + * placement. + * + * We skip copying from partitioned tables because we will copy the + * data from partition to partition's shards. To signal that, we leave + * readCoordPlacementPlan as null when that's the case. + */ + PlannedStmt *readCoordPlacementPlan = NULL; + if (!PartitionedTable(noneDistTableId)) + { + readCoordPlacementPlan = + PlanReadCopyableColumnsFromTableQuery(noneDistTableId); + } + /* insert new placements to pg_dist_placement */ List *insertedPlacementList = NIL; WorkerNode *targetNode = NULL; @@ -79,21 +102,27 @@ NoneDistTableReplicateCoordinatorPlacement(Oid noneDistTableId, useExclusiveConnection); /* fetch coordinator placement before deleting it */ - Oid localPlacementTableId = GetTableLocalShardOid(noneDistTableId, shardId); ShardPlacement *coordinatorPlacement = linitial(ActiveShardPlacementListOnGroup(shardId, COORDINATOR_GROUP_ID)); /* - * CreateForeignKeysFromReferenceTablesOnShards and CopyFromLocalTableIntoDistTable + * CreateForeignKeysFromReferenceTablesOnShards and CopyFromQueryIntoNoneDistTable * need to ignore the local placement, hence we temporarily delete it before * calling them. */ DeleteShardPlacementRowGlobally(coordinatorPlacement->placementId); - /* and copy data from local placement to new placements */ - CopyFromLocalTableIntoDistTable( - localPlacementTableId, noneDistTableId - ); + /* and copy data from local placement to new placements, if needed */ + if (readCoordPlacementPlan) + { + /* + * Note that today the callers of this function already hold an + * exclusive lock on the provided table. In the future, if a caller + * does not hold such a lock, here we need to block writes to the + * shard placement on the coordinator before copying data. + */ + CopyFromQueryIntoNoneDistTable(readCoordPlacementPlan, noneDistTableId); + } /* * CreateShardsOnWorkers only creates the foreign keys where given relation @@ -116,12 +145,12 @@ NoneDistTableReplicateCoordinatorPlacement(Oid noneDistTableId, /* * NoneDistTableDeleteCoordinatorPlacement deletes pg_dist_placement record for - * local (presumably coordinator) shard placement of given none-distributed table. + * the coordinator shard placement of given none-distributed table. */ void NoneDistTableDeleteCoordinatorPlacement(Oid noneDistTableId) { - EnsureCoordinator(); + EnsurePropagationToCoordinator(); EnsureNoneDistTableWithCoordinatorPlacement(noneDistTableId); int64 shardId = GetFirstShardId(noneDistTableId); @@ -130,41 +159,25 @@ NoneDistTableDeleteCoordinatorPlacement(Oid noneDistTableId) ShardPlacement *coordinatorPlacement = linitial(ActiveShardPlacementListOnGroup(shardId, COORDINATOR_GROUP_ID)); - /* remove the old placement from metadata of local node, i.e., coordinator */ + /* remove the old placement from metadata */ DeleteShardPlacementRowGlobally(coordinatorPlacement->placementId); } /* - * NoneDistTableDropCoordinatorPlacementTable drops local (presumably coordinator) + * NoneDistTableDropCoordinatorPlacementTable drops the coordinator * shard placement table of given none-distributed table. */ void NoneDistTableDropCoordinatorPlacementTable(Oid noneDistTableId) { - EnsureCoordinator(); + EnsurePropagationToCoordinator(); if (HasDistributionKey(noneDistTableId)) { ereport(ERROR, (errmsg("table is not a none-distributed table"))); } - /* - * We undistribute Citus local tables that are not chained with any reference - * tables via foreign keys at the end of the utility hook. - * Here we temporarily set the related GUC to off to disable the logic for - * internally executed DDL's that might invoke this mechanism unnecessarily. - * - * We also temporarily disable citus.enable_manual_changes_to_shards GUC to - * allow given command to modify shard. Note that we disable it only for - * local session because changes made to shards are allowed for Citus internal - * backends anyway. - */ - int saveNestLevel = NewGUCNestLevel(); - - SetLocalEnableLocalReferenceForeignKeys(false); - SetLocalEnableManualChangesToShard(true); - StringInfo dropShardCommand = makeStringInfo(); int64 shardId = GetFirstShardId(noneDistTableId); ShardInterval *shardInterval = LoadShardInterval(shardId); @@ -176,7 +189,22 @@ NoneDistTableDropCoordinatorPlacementTable(Oid noneDistTableId) task->taskId = INVALID_TASK_ID; task->taskType = DDL_TASK; task->replicationModel = REPLICATION_MODEL_INVALID; - SetTaskQueryString(task, dropShardCommand->data); + + /* + * We undistribute Citus local tables that are not chained with any reference + * tables via foreign keys at the end of the utility hook. + * So we need to temporarily set the related GUC to off to disable the logic for + * internally executed DDL's that might invoke this mechanism unnecessarily. + * + * We also temporarily disable citus.enable_manual_changes_to_shards GUC to + * allow given command to modify shard. + */ + List *taskQueryStringList = list_make3( + "SET LOCAL citus.enable_local_reference_table_foreign_keys TO OFF;", + "SET LOCAL citus.enable_manual_changes_to_shards TO ON;", + dropShardCommand->data + ); + SetTaskQueryStringList(task, taskQueryStringList); ShardPlacement *targetPlacement = CitusMakeNode(ShardPlacement); SetPlacementNodeMetadata(targetPlacement, CoordinatorNodeIfAddedAsWorkerOrError()); @@ -185,8 +213,83 @@ NoneDistTableDropCoordinatorPlacementTable(Oid noneDistTableId) bool localExecutionSupported = true; ExecuteUtilityTaskList(list_make1(task), localExecutionSupported); +} + + +/* + * PlanReadCopyableColumnsFromTableQuery creates a plan to read + * copyable columns from given relation. + */ +static PlannedStmt * +PlanReadCopyableColumnsFromTableQuery(Oid relationId) +{ + Relation relation = RelationIdGetRelation(relationId); - AtEOXact_GUC(true, saveNestLevel); + StringInfo queryString = makeStringInfo(); + appendStringInfo(queryString, "SELECT %s FROM %s", + CopyableColumnNamesFromTupleDesc(RelationGetDescr(relation)), + generate_qualified_relation_name(relationId)); + + RelationClose(relation); + + Query *query = ParseQueryString(queryString->data, NULL, 0); + return planner(query, queryString->data, 0, NULL); +} + + +/* + * CopyFromQueryIntoNoneDistTable executes given query and copies data + * into the shard placements of given none-distributed table. + * + * We use CitusCopyDestReceiver to pass the tuples to relevant shard + * placements of the none-distributed table, which opens connections on + * demand / if needed and starts a COPY for each shard placement that + * will have data. + * + * And for the execution of the SELECT query, we go through the executor. + * + * So for both the query execution and data copying, the purpose is to + * make sure to correctly decide using local execution or remote execution + * when accessing shard placements for reads and writes. + */ +static void +CopyFromQueryIntoNoneDistTable(PlannedStmt *selectPlan, Oid noneDistTableId) +{ + if (HasDistributionKey(noneDistTableId)) + { + ereport(ERROR, (errmsg("table is not a none-distributed table"))); + } + + Relation relation = RelationIdGetRelation(noneDistTableId); + + TupleDesc tupleDescriptor = RelationGetDescr(relation); + List *copyableColumnNameList = + CopyablePlainColumnNameListFromTupleDesc(tupleDescriptor); + + RelationClose(relation); + + EState *estate = CreateExecutorState(); + + const bool nonPublishableData = false; + + /* safe to pass INVALID_PARTITION_COLUMN_INDEX for a none-dist table */ + const int partitionColumnIndex = INVALID_PARTITION_COLUMN_INDEX; + + /* we don't track query counters when copying data into shards */ + const bool trackQueryCounters = false; + DestReceiver *copyDest = + (DestReceiver *) CreateCitusCopyDestReceiver(noneDistTableId, + copyableColumnNameList, + partitionColumnIndex, + estate, NULL, nonPublishableData, + trackQueryCounters); + + /* we don't track query counters when reading data for copying into shards */ + DisableTrackingQueryCountersForPlannedStmt(selectPlan); + + ExecutePlanIntoDestReceiver(selectPlan, NULL, copyDest); + + FreeExecutorState(estate); } @@ -198,7 +301,7 @@ NoneDistTableDropCoordinatorPlacementTable(Oid noneDistTableId) static void CreateForeignKeysFromReferenceTablesOnShards(Oid noneDistTableId) { - EnsureCoordinator(); + EnsurePropagationToCoordinator(); if (HasDistributionKey(noneDistTableId)) { @@ -287,17 +390,3 @@ EnsureNoneDistTableWithCoordinatorPlacement(Oid noneDistTableId) ereport(ERROR, (errmsg("table does not have a coordinator placement"))); } } - - -/* - * SetLocalEnableManualChangesToShard locally enables - * citus.enable_manual_changes_to_shards GUC. - */ -static void -SetLocalEnableManualChangesToShard(bool state) -{ - set_config_option("citus.enable_manual_changes_to_shards", - state ? "on" : "off", - (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, - GUC_ACTION_LOCAL, true, 0, false); -} diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c index 5d1a5829dc5..3f85681e185 100644 --- a/src/backend/distributed/operations/shard_rebalancer.c +++ b/src/backend/distributed/operations/shard_rebalancer.c @@ -296,6 +296,9 @@ static int64 RebalanceTableShardsBackground(RebalanceOptions *options, Oid bool ParallelTransferReferenceTables, bool ParallelTransferColocatedShards); static void AcquireRebalanceColocationLock(Oid relationId, const char *operationName); +static char * AcquirePlacementColocationLockCommand(char *lockIdStr, int lockMode); +static LockAcquireResult AcquirePlacementColocationLockLocally(int64 lockId, + int lockMode); static void ExecutePlacementUpdates(List *placementUpdateList, Oid shardReplicationModeOid, char *noticeOperation); static float4 CalculateUtilization(float4 totalCost, float4 capacity); @@ -333,6 +336,7 @@ static void UpdateShardMoveDependencies(PlacementUpdateEvent *move, uint64 coloc ShardMoveDependencies shardMoveDependencies); /* declarations for dynamic loading */ +PG_FUNCTION_INFO_V1(citus_internal_acquire_placement_colocation_lock); PG_FUNCTION_INFO_V1(rebalance_table_shards); PG_FUNCTION_INFO_V1(replicate_table_shards); PG_FUNCTION_INFO_V1(get_rebalance_table_shards_plan); @@ -809,25 +813,96 @@ AcquireRebalanceColocationLock(Oid relationId, const char *operationName) * AcquirePlacementColocationLock tries to acquire a lock for * rebalance/replication while moving/copying the placement. If this * is it not possible it fails instantly because this means - * another move/copy is currently happening. This would really mess up planning. + * another move/copy is currently happening. This would really mess + * up planning. + * + * If we're on a worker, we acquire the lock on the coordinator via + * the remote metadata connection to the coordinator. */ void AcquirePlacementColocationLock(Oid relationId, int lockMode, const char *operationName) { - uint32 lockId = relationId; - LOCKTAG tag; - - CitusTableCacheEntry *citusTableCacheEntry = GetCitusTableCacheEntry(relationId); - if (citusTableCacheEntry->colocationId != INVALID_COLOCATION_ID) + LockAcquireResult result = LOCKACQUIRE_NOT_AVAIL; + if (IsCoordinator()) { - lockId = citusTableCacheEntry->colocationId; + uint32 lockId = relationId; + CitusTableCacheEntry *citusTableCacheEntry = GetCitusTableCacheEntry(relationId); + if (citusTableCacheEntry->colocationId != INVALID_COLOCATION_ID) + { + lockId = citusTableCacheEntry->colocationId; + } + + result = AcquirePlacementColocationLockLocally(lockId, lockMode); } + else + { + UseCoordinatedTransaction(); + Use2PCForCoordinatedTransaction(); - SET_LOCKTAG_REBALANCE_PLACEMENT_COLOCATION(tag, (int64) lockId); + WorkerNode *coordinator = CoordinatorNodeIfAddedAsWorkerOrError(); + MultiConnection *connection = + GetNodeConnection(REQUIRE_METADATA_CONNECTION, + coordinator->workerName, + coordinator->workerPort); - LockAcquireResult lockAcquired = LockAcquire(&tag, lockMode, false, true); - if (!lockAcquired) + if (PQstatus(connection->pgConn) != CONNECTION_OK) + { + ReportConnectionError(connection, WARNING); + ereport(ERROR, (errmsg("could not connect to coordinator to acquire " + "placement colocation lock required to %s %s", + operationName, + generate_qualified_relation_name(relationId)), + errdetail("The operation requires connectivity to " + "coordinator when running from a worker."))); + } + + MarkRemoteTransactionCritical(connection); + + RemoteTransactionBeginIfNecessary(connection); + + char *lockIdStr = NULL; + CitusTableCacheEntry *citusTableCacheEntry = GetCitusTableCacheEntry(relationId); + if (citusTableCacheEntry->colocationId != INVALID_COLOCATION_ID) + { + lockIdStr = psprintf("%u", citusTableCacheEntry->colocationId); + } + else + { + char *quotedQualifiedRelationName = + quote_literal_cstr(generate_qualified_relation_name(relationId)); + lockIdStr = psprintf("%s::regclass::oid", quotedQualifiedRelationName); + } + + char *command = AcquirePlacementColocationLockCommand(lockIdStr, lockMode); + int querySent = SendRemoteCommand(connection, command); + if (querySent == 0) + { + ReportConnectionError(connection, ERROR); + } + + bool raiseInterrupts = true; + PGresult *remoteResult = GetRemoteCommandResult(connection, raiseInterrupts); + if (!IsResponseOK(remoteResult)) + { + ReportResultError(connection, remoteResult, ERROR); + } + + int64 rowCount = PQntuples(remoteResult); + int64 colCount = PQnfields(remoteResult); + if (rowCount != 1 || colCount != 1) + { + ereport(ERROR, (errmsg("unexpected result from the coordinator when " + "acquiring placement colocation lock"))); + } + + result = ParseIntField(remoteResult, 0, 0); + + PQclear(remoteResult); + ForgetResults(connection); + } + + if (!result) { ereport(ERROR, (errmsg("could not acquire the lock required to %s %s", operationName, @@ -841,6 +916,65 @@ AcquirePlacementColocationLock(Oid relationId, int lockMode, } +/* + * AcquirePlacementColocationLockCommand returns a command to call + * citus_internal_acquire_placement_colocation_lock(). + */ +static char * +AcquirePlacementColocationLockCommand(char *lockIdStr, int lockMode) +{ + StringInfo command = makeStringInfo(); + appendStringInfo(command, + "SELECT citus_internal.acquire_placement_colocation_lock(%s, %d)", + lockIdStr, lockMode); + return command->data; +} + + +/* + * citus_internal_acquire_placement_colocation_lock calls + * AcquirePlacementColocationLockLocally(). + */ +Datum +citus_internal_acquire_placement_colocation_lock(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + + PG_ENSURE_ARGNOTNULL(0, "lock_id"); + int64 lockId = PG_GETARG_INT64(0); + if (lockId < 0) + { + ereport(ERROR, (errmsg("invalid lock id %ld", lockId))); + } + + PG_ENSURE_ARGNOTNULL(1, "lock_mode"); + int lockModeInt = PG_GETARG_INT32(1); + LOCKMODE lockMode = IntToLockMode(lockModeInt); + + LockAcquireResult result = + AcquirePlacementColocationLockLocally(lockId, lockMode); + PG_RETURN_INT32(result); +} + + +/* + * AcquirePlacementColocationLockLocally tries to acquire a lock for + * rebalance/replication while moving/copying the placement with given + * lock id. + * + * If the Citus table belongs to colocation group, the lock id will be + * the colocation id of the group, otherwise it will be the relation + * id of the table. + */ +static LockAcquireResult +AcquirePlacementColocationLockLocally(int64 lockId, int lockMode) +{ + LOCKTAG tag; + SET_LOCKTAG_REBALANCE_PLACEMENT_COLOCATION(tag, lockId); + return LockAcquire(&tag, lockMode, false, true); +} + + /* * GetResponsiveWorkerList returns a List of workers that respond to new * connection requests. diff --git a/src/backend/distributed/operations/worker_node_manager.c b/src/backend/distributed/operations/worker_node_manager.c index 8a4245ca0a0..442441211e3 100644 --- a/src/backend/distributed/operations/worker_node_manager.c +++ b/src/backend/distributed/operations/worker_node_manager.c @@ -43,6 +43,7 @@ int MaxWorkerNodesTracked = 2048; /* determines worker node hash table size * /* Local functions forward declarations */ static bool NodeIsPrimaryWorker(WorkerNode *node); +static bool NodeIsRemotePrimaryWorker(WorkerNode *node); static bool NodeIsReadableWorker(WorkerNode *node); @@ -168,6 +169,20 @@ ActivePrimaryNonCoordinatorNodeList(LOCKMODE lockMode) } +/* + * ActivePrimaryRemoteNonCoordinatorNodeList returns a list of all active primary worker nodes + * in workerNodeHash except the local one. lockMode specifies which lock to use on pg_dist_node, + * this is necessary when the caller wouldn't want nodes to be added concurrent to their use of + * this list. This method excludes coordinator even if it is added as a worker to cluster. + */ +List * +ActivePrimaryRemoteNonCoordinatorNodeList(LOCKMODE lockMode) +{ + EnsureModificationsCanRun(); + return FilterActiveNodeListFunc(lockMode, NodeIsRemotePrimaryWorker); +} + + /* * ActivePrimaryNodeList returns a list of all active primary nodes in * workerNodeHash. @@ -202,6 +217,16 @@ NodeIsPrimaryWorker(WorkerNode *node) } +/* + * NodeIsPrimaryAndRemote returns true if the node is a remote primary worker node. + */ +static bool +NodeIsRemotePrimaryWorker(WorkerNode *node) +{ + return !NodeIsCoordinator(node) && NodeIsPrimaryAndRemote(node); +} + + /* * CoordinatorAddedAsWorkerNode returns true if coordinator is added to the * pg_dist_node. diff --git a/src/backend/distributed/operations/worker_shard_copy.c b/src/backend/distributed/operations/worker_shard_copy.c index 6c2d767416f..4e002193ab4 100644 --- a/src/backend/distributed/operations/worker_shard_copy.c +++ b/src/backend/distributed/operations/worker_shard_copy.c @@ -19,6 +19,7 @@ #include "utils/lsyscache.h" #include "distributed/commands/multi_copy.h" +#include "distributed/commands/utility_hook.h" #include "distributed/connection_management.h" #include "distributed/local_executor.h" #include "distributed/local_multi_copy.h" @@ -384,7 +385,7 @@ CopyableColumnNamesFromTupleDesc(TupleDesc tupDesc) for (int i = 0; i < tupDesc->natts; i++) { Form_pg_attribute att = TupleDescAttr(tupDesc, i); - if (att->attgenerated || att->attisdropped) + if (IsDroppedOrGenerated(att)) { continue; } diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c index ed8ea3b3ac2..d80216b3682 100644 --- a/src/backend/distributed/planner/distributed_planner.c +++ b/src/backend/distributed/planner/distributed_planner.c @@ -109,6 +109,7 @@ static PlannedStmt * FinalizeNonRouterPlan(PlannedStmt *localPlan, static PlannedStmt * FinalizeRouterPlan(PlannedStmt *localPlan, CustomScan *customScan); static AppendRelInfo * FindTargetAppendRelInfo(PlannerInfo *root, int relationRteIndex); static List * makeTargetListFromCustomScanList(List *custom_scan_tlist); +static void DisableTrackingQueryCountersForPlanTree(struct Plan *planTree); static List * makeCustomScanTargetlistFromExistingTargetList(List *existingTargetlist); static int32 BlessRecordExpressionList(List *exprs); static void CheckNodeIsDumpable(Node *node); @@ -1702,6 +1703,64 @@ makeTargetListFromCustomScanList(List *custom_scan_tlist) } +/* + * DisableTrackingQueryCountersForPlannedStmt takes a PlannedStmt and + * disables tracking query counters for the distributed parts of the plan. + */ +void +DisableTrackingQueryCountersForPlannedStmt(PlannedStmt *plannedStmt) +{ + DisableTrackingQueryCountersForPlanTree(plannedStmt->planTree); +} + + +/* + * DisableTrackingQueryCountersForPlanTree takes a plan tree and + * disables tracking query counters for it if it's a distributed plan + * and its distributed children recursively. + * + * Note that today none of the callers provide a plan tree with subplans + * at any level, so we throw an error if we find any subplans to avoid + * unnecessary implementation. + */ +static void +DisableTrackingQueryCountersForPlanTree(struct Plan *planTree) +{ + /* we don't expect very deep plan trees but let's be on the safe side */ + CHECK_FOR_INTERRUPTS(); + check_stack_depth(); + + if (planTree == NULL) + { + return; + } + + DisableTrackingQueryCountersForPlanTree(planTree->lefttree); + DisableTrackingQueryCountersForPlanTree(planTree->righttree); + + if (!IsCitusCustomScan(planTree)) + { + return; + } + + DistributedPlan *distPlan = GetDistributedPlan((CustomScan *) planTree); + distPlan->disableTrackingQueryCounters = true; + + if (distPlan->selectPlanForModifyViaCoordinatorOrRepartition) + { + DisableTrackingQueryCountersForPlanTree(distPlan-> + selectPlanForModifyViaCoordinatorOrRepartition + ->planTree); + } + + if (list_length(distPlan->subPlanList) > 0 || + list_length(distPlan->usedSubPlanNodeList) > 0) + { + ereport(ERROR, (errmsg("unexpected subplans in distributed plan"))); + } +} + + /* * BlessRecordExpression ensures we can parse an anonymous composite type on the * target list of a query that is sent to the worker. diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 1e19bf738e0..7066ef34fca 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -120,7 +120,7 @@ /* marks shared object as one loadable by the postgres version compiled against */ #if PG_VERSION_NUM >= PG_VERSION_18 -PG_MODULE_MAGIC_EXT(.name = "citus", .version = "14.0.0"); +PG_MODULE_MAGIC_EXT(.name = "citus", .version = "14.1.0"); #else PG_MODULE_MAGIC; #endif @@ -2240,7 +2240,20 @@ RegisterCitusConfigVariables(void) "instead be generated by incrementing from the value of " "this GUC and this will be reflected in the GUC. This is " "mainly useful to ensure consistent placement IDs when running " - "tests in parallel."), + "tests in parallel." + "Note that the function consuming the next placement id always " + "connects to the coordinator to get the next value, if the " + "session is not connected to the coordinator. So this GUC " + "is only effective on the coordinator when creating new " + "shards." + "And also note that if the citus internal connection that " + "is used to connect to coordinator to retrieve the next " + "placement id changes, then the value this GUC was set to " + "would be ineffective too. For this reason, when testing a " + "command that creates shards via a worker node, it's " + "recommended to directly alter " + "pg_dist_placement_placementid_seq at the system level on " + "the coordinator."), &NextPlacementId, 0, 0, INT_MAX, PGC_USERSET, @@ -2255,7 +2268,19 @@ RegisterCitusConfigVariables(void) "instead be generated by incrementing from the value of " "this GUC and this will be reflected in the GUC. This is " "mainly useful to ensure consistent shard IDs when running " - "tests in parallel."), + "tests in parallel." + "Note that the function consuming the next shard id always " + "connects to the coordinator to get the next value, if the " + "session is not connected to the coordinator. So this GUC " + "is only effective on the coordinator when creating new " + "shards." + "And also note that if the citus internal connection that " + "is used to connect to coordinator to retrieve the next " + "shard id changes, then the value this GUC was set to " + "would be ineffective too. For this reason, when testing a " + "command that creates shards via a worker node, it's " + "recommended to directly alter pg_dist_shardid_seq at the " + "system level on the coordinator."), &NextShardId, 0, 0, INT_MAX, PGC_USERSET, diff --git a/src/backend/distributed/sql/citus--13.2-1--13.3-1.sql b/src/backend/distributed/sql/citus--13.2-1--13.3-1.sql new file mode 100644 index 00000000000..68bcc997b37 --- /dev/null +++ b/src/backend/distributed/sql/citus--13.2-1--13.3-1.sql @@ -0,0 +1,11 @@ +-- citus--13.2-1--13.3-1 +-- bump version to 13.3-1 + +#include "udfs/citus_internal_get_next_colocation_id/13.3-1.sql" + +#include "udfs/citus_internal_adjust_identity_column_seq_settings/13.3-1.sql" +#include "udfs/worker_apply_sequence_command/13.3-1.sql" + +#include "udfs/citus_internal_lock_colocation_id/13.3-1.sql" + +#include "udfs/citus_internal_acquire_placement_colocation_lock/13.3-1.sql" diff --git a/src/backend/distributed/sql/citus--14.0-1--14.1-1.sql b/src/backend/distributed/sql/citus--14.0-1--14.1-1.sql new file mode 100644 index 00000000000..c90840e6aba --- /dev/null +++ b/src/backend/distributed/sql/citus--14.0-1--14.1-1.sql @@ -0,0 +1,11 @@ +-- citus--14.0-1--14.1-1 +-- bump version to 14.1-1 + +#include "udfs/citus_internal_get_next_colocation_id/14.1-1.sql" + +#include "udfs/citus_internal_adjust_identity_column_seq_settings/14.1-1.sql" +#include "udfs/worker_apply_sequence_command/14.1-1.sql" + +#include "udfs/citus_internal_lock_colocation_id/14.1-1.sql" + +#include "udfs/citus_internal_acquire_placement_colocation_lock/14.1-1.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--13.3-1--13.2-1.sql b/src/backend/distributed/sql/downgrades/citus--13.3-1--13.2-1.sql new file mode 100644 index 00000000000..efc997c828f --- /dev/null +++ b/src/backend/distributed/sql/downgrades/citus--13.3-1--13.2-1.sql @@ -0,0 +1,11 @@ +-- citus--13.3-1--13.2-1 +-- downgrade version to 13.2-1 + +DROP FUNCTION IF EXISTS citus_internal.get_next_colocation_id(); + +DROP FUNCTION IF EXISTS citus_internal.adjust_identity_column_seq_settings(regclass, bigint, boolean); +DROP FUNCTION IF EXISTS pg_catalog.worker_apply_sequence_command(text, regtype, bigint, boolean); + +DROP FUNCTION IF EXISTS citus_internal.lock_colocation_id(int, int); + +DROP FUNCTION IF EXISTS citus_internal.acquire_placement_colocation_lock(bigint, int); diff --git a/src/backend/distributed/sql/downgrades/citus--14.1-1--14.0-1.sql b/src/backend/distributed/sql/downgrades/citus--14.1-1--14.0-1.sql new file mode 100644 index 00000000000..aba495e796a --- /dev/null +++ b/src/backend/distributed/sql/downgrades/citus--14.1-1--14.0-1.sql @@ -0,0 +1,11 @@ +-- citus--14.1-1--14.0-1 +-- downgrade version to 14.0-1 + +DROP FUNCTION IF EXISTS citus_internal.get_next_colocation_id(); + +DROP FUNCTION IF EXISTS citus_internal.adjust_identity_column_seq_settings(regclass, bigint, boolean); +DROP FUNCTION IF EXISTS pg_catalog.worker_apply_sequence_command(text, regtype, bigint, boolean); + +DROP FUNCTION IF EXISTS citus_internal.lock_colocation_id(int, int); + +DROP FUNCTION IF EXISTS citus_internal.acquire_placement_colocation_lock(bigint, int); diff --git a/src/backend/distributed/sql/udfs/citus_drop_trigger/13.1-1.sql b/src/backend/distributed/sql/udfs/citus_drop_trigger/13.1-1.sql index 16a1b0164ff..f4c500e9172 100644 --- a/src/backend/distributed/sql/udfs/citus_drop_trigger/13.1-1.sql +++ b/src/backend/distributed/sql/udfs/citus_drop_trigger/13.1-1.sql @@ -11,9 +11,9 @@ BEGIN FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() WHERE object_type IN ('table', 'foreign table') LOOP - -- first drop the table and metadata on the workers - -- then drop all the shards on the workers - -- finally remove the pg_dist_partition entry on the coordinator + -- first drop the table and metadata on the remote nodes + -- then drop all the shards on the remote nodes + -- finally remove the pg_dist_partition entry on the local node PERFORM master_remove_distributed_table_metadata_from_workers(v_obj.objid, v_obj.schema_name, v_obj.object_name); -- If both original and normal values are false, the dropped table was a partition diff --git a/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql b/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql index 16a1b0164ff..f4c500e9172 100644 --- a/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql @@ -11,9 +11,9 @@ BEGIN FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() WHERE object_type IN ('table', 'foreign table') LOOP - -- first drop the table and metadata on the workers - -- then drop all the shards on the workers - -- finally remove the pg_dist_partition entry on the coordinator + -- first drop the table and metadata on the remote nodes + -- then drop all the shards on the remote nodes + -- finally remove the pg_dist_partition entry on the local node PERFORM master_remove_distributed_table_metadata_from_workers(v_obj.objid, v_obj.schema_name, v_obj.object_name); -- If both original and normal values are false, the dropped table was a partition diff --git a/src/backend/distributed/sql/udfs/citus_internal_acquire_placement_colocation_lock/13.3-1.sql b/src/backend/distributed/sql/udfs/citus_internal_acquire_placement_colocation_lock/13.3-1.sql new file mode 100644 index 00000000000..64c62869d6c --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_acquire_placement_colocation_lock/13.3-1.sql @@ -0,0 +1,6 @@ +CREATE OR REPLACE FUNCTION citus_internal.acquire_placement_colocation_lock(lock_id bigint, lock_mode int) + RETURNS int + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_acquire_placement_colocation_lock$$; +COMMENT ON FUNCTION citus_internal.acquire_placement_colocation_lock(bigint, int) + IS 'acquire a placement colocation lock on a colocation id'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_acquire_placement_colocation_lock/14.1-1.sql b/src/backend/distributed/sql/udfs/citus_internal_acquire_placement_colocation_lock/14.1-1.sql new file mode 100644 index 00000000000..64c62869d6c --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_acquire_placement_colocation_lock/14.1-1.sql @@ -0,0 +1,6 @@ +CREATE OR REPLACE FUNCTION citus_internal.acquire_placement_colocation_lock(lock_id bigint, lock_mode int) + RETURNS int + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_acquire_placement_colocation_lock$$; +COMMENT ON FUNCTION citus_internal.acquire_placement_colocation_lock(bigint, int) + IS 'acquire a placement colocation lock on a colocation id'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_acquire_placement_colocation_lock/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_acquire_placement_colocation_lock/latest.sql new file mode 100644 index 00000000000..64c62869d6c --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_acquire_placement_colocation_lock/latest.sql @@ -0,0 +1,6 @@ +CREATE OR REPLACE FUNCTION citus_internal.acquire_placement_colocation_lock(lock_id bigint, lock_mode int) + RETURNS int + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_acquire_placement_colocation_lock$$; +COMMENT ON FUNCTION citus_internal.acquire_placement_colocation_lock(bigint, int) + IS 'acquire a placement colocation lock on a colocation id'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_adjust_identity_column_seq_settings/13.3-1.sql b/src/backend/distributed/sql/udfs/citus_internal_adjust_identity_column_seq_settings/13.3-1.sql new file mode 100644 index 00000000000..ab892e4c36c --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_adjust_identity_column_seq_settings/13.3-1.sql @@ -0,0 +1,8 @@ +CREATE OR REPLACE FUNCTION citus_internal.adjust_identity_column_seq_settings(sequence_id regclass, + last_value bigint, + is_called boolean) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_adjust_identity_column_seq_settings$$; +COMMENT ON FUNCTION citus_internal.adjust_identity_column_seq_settings(regclass, bigint, boolean) + IS 'modify identity column sequence settings to produce globally unique values'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_adjust_identity_column_seq_settings/14.1-1.sql b/src/backend/distributed/sql/udfs/citus_internal_adjust_identity_column_seq_settings/14.1-1.sql new file mode 100644 index 00000000000..ab892e4c36c --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_adjust_identity_column_seq_settings/14.1-1.sql @@ -0,0 +1,8 @@ +CREATE OR REPLACE FUNCTION citus_internal.adjust_identity_column_seq_settings(sequence_id regclass, + last_value bigint, + is_called boolean) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_adjust_identity_column_seq_settings$$; +COMMENT ON FUNCTION citus_internal.adjust_identity_column_seq_settings(regclass, bigint, boolean) + IS 'modify identity column sequence settings to produce globally unique values'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_adjust_identity_column_seq_settings/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_adjust_identity_column_seq_settings/latest.sql new file mode 100644 index 00000000000..ab892e4c36c --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_adjust_identity_column_seq_settings/latest.sql @@ -0,0 +1,8 @@ +CREATE OR REPLACE FUNCTION citus_internal.adjust_identity_column_seq_settings(sequence_id regclass, + last_value bigint, + is_called boolean) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_adjust_identity_column_seq_settings$$; +COMMENT ON FUNCTION citus_internal.adjust_identity_column_seq_settings(regclass, bigint, boolean) + IS 'modify identity column sequence settings to produce globally unique values'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_get_next_colocation_id/13.3-1.sql b/src/backend/distributed/sql/udfs/citus_internal_get_next_colocation_id/13.3-1.sql new file mode 100644 index 00000000000..e6df45559c3 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_get_next_colocation_id/13.3-1.sql @@ -0,0 +1,6 @@ +CREATE OR REPLACE FUNCTION citus_internal.get_next_colocation_id() + RETURNS bigint + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_get_next_colocation_id$$; +COMMENT ON FUNCTION citus_internal.get_next_colocation_id() + IS 'retrieves the next colocation id from pg_dist_colocationid_seq'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_get_next_colocation_id/14.1-1.sql b/src/backend/distributed/sql/udfs/citus_internal_get_next_colocation_id/14.1-1.sql new file mode 100644 index 00000000000..e6df45559c3 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_get_next_colocation_id/14.1-1.sql @@ -0,0 +1,6 @@ +CREATE OR REPLACE FUNCTION citus_internal.get_next_colocation_id() + RETURNS bigint + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_get_next_colocation_id$$; +COMMENT ON FUNCTION citus_internal.get_next_colocation_id() + IS 'retrieves the next colocation id from pg_dist_colocationid_seq'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_get_next_colocation_id/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_get_next_colocation_id/latest.sql new file mode 100644 index 00000000000..e6df45559c3 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_get_next_colocation_id/latest.sql @@ -0,0 +1,6 @@ +CREATE OR REPLACE FUNCTION citus_internal.get_next_colocation_id() + RETURNS bigint + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_get_next_colocation_id$$; +COMMENT ON FUNCTION citus_internal.get_next_colocation_id() + IS 'retrieves the next colocation id from pg_dist_colocationid_seq'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_lock_colocation_id/13.3-1.sql b/src/backend/distributed/sql/udfs/citus_internal_lock_colocation_id/13.3-1.sql new file mode 100644 index 00000000000..0bbccf10720 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_lock_colocation_id/13.3-1.sql @@ -0,0 +1,6 @@ +CREATE OR REPLACE FUNCTION citus_internal.lock_colocation_id(colocation_id int, lock_mode int) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_lock_colocation_id$$; +COMMENT ON FUNCTION citus_internal.lock_colocation_id(int, int) + IS 'acquire a lock on a colocation id'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_lock_colocation_id/14.1-1.sql b/src/backend/distributed/sql/udfs/citus_internal_lock_colocation_id/14.1-1.sql new file mode 100644 index 00000000000..0bbccf10720 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_lock_colocation_id/14.1-1.sql @@ -0,0 +1,6 @@ +CREATE OR REPLACE FUNCTION citus_internal.lock_colocation_id(colocation_id int, lock_mode int) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_lock_colocation_id$$; +COMMENT ON FUNCTION citus_internal.lock_colocation_id(int, int) + IS 'acquire a lock on a colocation id'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_lock_colocation_id/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_lock_colocation_id/latest.sql new file mode 100644 index 00000000000..0bbccf10720 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_lock_colocation_id/latest.sql @@ -0,0 +1,6 @@ +CREATE OR REPLACE FUNCTION citus_internal.lock_colocation_id(colocation_id int, lock_mode int) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_lock_colocation_id$$; +COMMENT ON FUNCTION citus_internal.lock_colocation_id(int, int) + IS 'acquire a lock on a colocation id'; diff --git a/src/backend/distributed/sql/udfs/worker_apply_sequence_command/13.3-1.sql b/src/backend/distributed/sql/udfs/worker_apply_sequence_command/13.3-1.sql new file mode 100644 index 00000000000..615feff03a6 --- /dev/null +++ b/src/backend/distributed/sql/udfs/worker_apply_sequence_command/13.3-1.sql @@ -0,0 +1,9 @@ +CREATE OR REPLACE FUNCTION pg_catalog.worker_apply_sequence_command(create_sequence_command text, + sequence_type_id regtype, + last_value bigint, + is_called boolean) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$worker_apply_sequence_command$$; +COMMENT ON FUNCTION pg_catalog.worker_apply_sequence_command(text,regtype,bigint,boolean) + IS 'create a sequence which produces globally unique values'; diff --git a/src/backend/distributed/sql/udfs/worker_apply_sequence_command/14.1-1.sql b/src/backend/distributed/sql/udfs/worker_apply_sequence_command/14.1-1.sql new file mode 100644 index 00000000000..615feff03a6 --- /dev/null +++ b/src/backend/distributed/sql/udfs/worker_apply_sequence_command/14.1-1.sql @@ -0,0 +1,9 @@ +CREATE OR REPLACE FUNCTION pg_catalog.worker_apply_sequence_command(create_sequence_command text, + sequence_type_id regtype, + last_value bigint, + is_called boolean) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$worker_apply_sequence_command$$; +COMMENT ON FUNCTION pg_catalog.worker_apply_sequence_command(text,regtype,bigint,boolean) + IS 'create a sequence which produces globally unique values'; diff --git a/src/backend/distributed/sql/udfs/worker_apply_sequence_command/latest.sql b/src/backend/distributed/sql/udfs/worker_apply_sequence_command/latest.sql new file mode 100644 index 00000000000..615feff03a6 --- /dev/null +++ b/src/backend/distributed/sql/udfs/worker_apply_sequence_command/latest.sql @@ -0,0 +1,9 @@ +CREATE OR REPLACE FUNCTION pg_catalog.worker_apply_sequence_command(create_sequence_command text, + sequence_type_id regtype, + last_value bigint, + is_called boolean) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$worker_apply_sequence_command$$; +COMMENT ON FUNCTION pg_catalog.worker_apply_sequence_command(text,regtype,bigint,boolean) + IS 'create a sequence which produces globally unique values'; diff --git a/src/backend/distributed/test/run_from_same_connection.c b/src/backend/distributed/test/run_from_same_connection.c index e7fe4bf3007..64ae18c790d 100644 --- a/src/backend/distributed/test/run_from_same_connection.c +++ b/src/backend/distributed/test/run_from_same_connection.c @@ -29,6 +29,7 @@ #include "distributed/intermediate_result_pruning.h" #include "distributed/lock_graph.h" #include "distributed/metadata_cache.h" +#include "distributed/metadata_sync.h" #include "distributed/remote_commands.h" #include "distributed/run_from_same_connection.h" #include "distributed/version_compat.h" @@ -164,6 +165,27 @@ run_commands_on_session_level_connection_to_node(PG_FUNCTION_ARGS) "start_session_level_connection_to_node must be called first to open a session level connection"); } + /* + * Connection saved in start_session_level_connection_to_node() might have + * been already used as a citus internal connection before to connect to + * another node or, it might have been the case after it's saved as a + * session level connection. And when sending commands over a citus internal + * connection, sometimes we disable citus.enable_ddl_propagation and don't + * care about enabling it back, given that a citus internal connection + * should never be interested in what citus.enable_ddl_propagation is set + * to. In such cases, it'll stay as disabled and unless we disabled it in + * system level etc., this is not something we want when executing a command + * that's not for Citus internal purposes via this UDF. For this reason, we + * first reset it here. + * + * Ideally, maybe we should ensure that all the code-paths that disable + * citus.enable_ddl_propagation for a citus internal connection re-enable it + * back, but it's not really something that such a connection should ever + * be interested in. Plus, this only affects isolation tests, so we choose + * to reset it here instead. + */ + ExecuteCriticalRemoteCommand(singleConnection, "RESET citus.enable_ddl_propagation"); + appendStringInfo(processStringInfo, ALTER_CURRENT_PROCESS_ID, MyProcPid); appendStringInfo(workerProcessStringInfo, ALTER_CURRENT_WORKER_PROCESS_ID, GetRemoteProcessId()); diff --git a/src/backend/distributed/transaction/worker_transaction.c b/src/backend/distributed/transaction/worker_transaction.c index 7da7210b411..7e929847b63 100644 --- a/src/backend/distributed/transaction/worker_transaction.c +++ b/src/backend/distributed/transaction/worker_transaction.c @@ -167,6 +167,48 @@ SendCommandToRemoteNodesWithMetadata(const char *command) } +/* + * SendCommandToRemoteWorkersWithMetadata sends a command to remote workers in + * parallel. Commands are committed on the nodes when the local transaction + * commits. + */ +void +SendCommandToRemoteWorkersWithMetadata(const char *command) +{ + SendCommandToRemoteWorkersWithMetadataParams(command, CurrentUserName(), + 0, NULL, NULL); +} + + +/* + * SendCommandToCoordinator sends a command to coordinator by opening a super + * user connection. * Command is committed on the coordinator when the local + * transaction commits. The connection is made as the extension owner to ensure + * write access to the Citus metadata tables. + * + * Since we prevent to open superuser connections for metadata tables, it is + * discouraged to use it. Consider using it only for locking metadata tables + * on the coordinator before creating distributed tables or before propagating + * pg_dist_object tuples for dependent objects. + */ +void +SendCommandToCoordinatorViaSuperUser(const char *command) +{ + SendCommandToCoordinatorParams(command, CitusExtensionOwnerName(), 0, NULL, NULL); +} + + +/* + * SendCommandToCoordinator sends a command to coordinator. + * Command is committed on the coordinator when the local transaction commits. + */ +void +SendCommandToCoordinator(const char *command) +{ + SendCommandToCoordinatorParams(command, CurrentUserName(), 0, NULL, NULL); +} + + /* * SendCommandToRemoteNodesWithMetadataViaSuperUser sends a command to remote * nodes in parallel by opening a super user connection. Commands are committed @@ -222,6 +264,50 @@ SendCommandToRemoteMetadataNodesParams(const char *command, } +/* + * SendCommandToRemoteWorkersWithMetadataParams is a wrapper around + * SendCommandToWorkersParamsInternal() that can be used to send commands + * to remote metadata workers. + */ +void +SendCommandToRemoteWorkersWithMetadataParams(const char *command, + const char *user, int parameterCount, + const Oid *parameterTypes, + const char *const *parameterValues) +{ + /* use METADATA_NODES so that ErrorIfAnyMetadataNodeOutOfSync checks local node as well */ + List *workerNodeList = TargetWorkerSetNodeList(METADATA_NODES, + RowShareLock); + + ErrorIfAnyMetadataNodeOutOfSync(workerNodeList); + + SendCommandToWorkersParamsInternal(REMOTE_NON_COORDINATOR_METADATA_NODES, command, + user, + parameterCount, parameterTypes, parameterValues); +} + + +/* + * SendCommandToCoordinatorParams is a wrapper around SendCommandToWorkersParamsInternal() + * that can be used to send commands to coordinator. + */ +void +SendCommandToCoordinatorParams(const char *command, + const char *user, int parameterCount, + const Oid *parameterTypes, + const char *const *parameterValues) +{ + /* use METADATA_NODES so that ErrorIfAnyMetadataNodeOutOfSync checks local node as well */ + List *workerNodeList = TargetWorkerSetNodeList(METADATA_NODES, + RowShareLock); + + ErrorIfAnyMetadataNodeOutOfSync(workerNodeList); + + SendCommandToWorkersParamsInternal(ONLY_COORDINATOR_NODE, command, user, + parameterCount, parameterTypes, parameterValues); +} + + /* * TargetWorkerSetNodeList returns a list of WorkerNode's that satisfies the * TargetWorkerSet. @@ -244,6 +330,16 @@ TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode) { workerNodeList = ActivePrimaryNonCoordinatorNodeList(lockMode); } + else if (targetWorkerSet == REMOTE_NON_COORDINATOR_METADATA_NODES) + { + workerNodeList = ActivePrimaryRemoteNonCoordinatorNodeList(lockMode); + } + else if (targetWorkerSet == ONLY_COORDINATOR_NODE) + { + /* call this first like other functions returning a node list */ + EnsureModificationsCanRun(); + workerNodeList = list_make1(CoordinatorNodeIfAddedAsWorkerOrError()); + } else { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -257,7 +353,8 @@ TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode) { if ((targetWorkerSet == NON_COORDINATOR_METADATA_NODES || targetWorkerSet == REMOTE_METADATA_NODES || - targetWorkerSet == METADATA_NODES) && + targetWorkerSet == METADATA_NODES || + targetWorkerSet == REMOTE_NON_COORDINATOR_METADATA_NODES) && !workerNode->hasMetadata) { continue; diff --git a/src/backend/distributed/utils/citus_copyfuncs.c b/src/backend/distributed/utils/citus_copyfuncs.c index 75dbae3fcaf..74496151c0f 100644 --- a/src/backend/distributed/utils/citus_copyfuncs.c +++ b/src/backend/distributed/utils/citus_copyfuncs.c @@ -139,6 +139,7 @@ CopyNodeDistributedPlan(COPYFUNC_ARGS) COPY_NODE_FIELD(planningError); COPY_SCALAR_FIELD(sourceResultRepartitionColumnIndex); + COPY_SCALAR_FIELD(disableTrackingQueryCounters); } diff --git a/src/backend/distributed/utils/citus_outfuncs.c b/src/backend/distributed/utils/citus_outfuncs.c index edf4731a6ce..4782cab5de4 100644 --- a/src/backend/distributed/utils/citus_outfuncs.c +++ b/src/backend/distributed/utils/citus_outfuncs.c @@ -204,6 +204,7 @@ OutDistributedPlan(OUTFUNC_ARGS) WRITE_NODE_FIELD(planningError); WRITE_INT_FIELD(sourceResultRepartitionColumnIndex); + WRITE_BOOL_FIELD(disableTrackingQueryCounters); } diff --git a/src/backend/distributed/utils/colocation_utils.c b/src/backend/distributed/utils/colocation_utils.c index 800ef08b2d9..271a7617fc7 100644 --- a/src/backend/distributed/utils/colocation_utils.c +++ b/src/backend/distributed/utils/colocation_utils.c @@ -30,12 +30,14 @@ #include "distributed/commands.h" #include "distributed/coordinator_protocol.h" #include "distributed/listutils.h" +#include "distributed/lock_graph.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/metadata_utility.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/pg_dist_colocation.h" +#include "distributed/remote_commands.h" #include "distributed/resource_lock.h" #include "distributed/shardinterval_utils.h" #include "distributed/tenant_schema_metadata.h" @@ -55,6 +57,9 @@ static int CompareShardPlacementsByNode(const void *leftElement, const void *rightElement); static uint32 CreateColocationGroupForRelation(Oid sourceRelationId); static void BreakColocation(Oid sourceRelationId); +static uint32 GetNextColocationId(void); +static int64 GetNextColocationIdFromNode(WorkerNode *node); +static uint32 GetNextColocationIdInternal(void); static uint32 SingleShardTableGetNodeId(Oid relationId); @@ -62,6 +67,7 @@ static uint32 SingleShardTableGetNodeId(Oid relationId); PG_FUNCTION_INFO_V1(mark_tables_colocated); PG_FUNCTION_INFO_V1(get_colocated_shard_array); PG_FUNCTION_INFO_V1(update_distributed_table_colocation); +PG_FUNCTION_INFO_V1(citus_internal_get_next_colocation_id); /* @@ -643,7 +649,111 @@ InsertColocationGroupLocally(uint32 colocationId, int shardCount, int replicatio /* - * GetNextColocationId allocates and returns a unique colocationId for the + * GetNextColocationId retrieves the next colocation id either from the local + * node if it's the coordinator or retrieves it from the coordinator otherwise. + * + * Throws an error for the latter case if the coordinator is not in metadata. + */ +static uint32 +GetNextColocationId(void) +{ + uint32 colocationId = INVALID_COLOCATION_ID; + if (IsCoordinator()) + { + colocationId = GetNextColocationIdInternal(); + } + else + { + /* + * If we're not on the coordinator, retrieve the next id from the + * coordinator node. Although all nodes have the sequence, we don't + * synchronize the sequences that are part of the Citus metadata + * across nodes, so we need to get the next value from the + * coordinator. + * + * Note that before this point, we should have already verified + * that coordinator is added into the metadata. + */ + WorkerNode *coordinator = CoordinatorNodeIfAddedAsWorkerOrError(); + colocationId = GetNextColocationIdFromNode(coordinator); + } + + return colocationId; +} + + +/* + * GetNextColocationIdFromNode gets the next colocation id from given + * node by calling citus_internal.get_next_colocation_id() function. + */ +static int64 +GetNextColocationIdFromNode(WorkerNode *node) +{ + const char *nodeName = node->workerName; + int nodePort = node->workerPort; + uint32 connectionFlags = 0; + MultiConnection *connection = GetNodeConnection(connectionFlags, nodeName, nodePort); + + int querySent = SendRemoteCommand(connection, + "SELECT citus_internal.get_next_colocation_id();"); + if (querySent == 0) + { + ReportConnectionError(connection, ERROR); + } + + bool raiseInterrupts = true; + PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); + if (!IsResponseOK(result)) + { + ReportResultError(connection, result, ERROR); + } + + int64 rowCount = PQntuples(result); + int64 colCount = PQnfields(result); + if (rowCount != 1 || colCount != 1) + { + ereport(ERROR, (errmsg("unexpected result from the node when getting " + "next colocation id"))); + } + + int64 colocationId = ParseIntField(result, 0, 0); + + PQclear(result); + ForgetResults(connection); + + return colocationId; +} + + +/* + * citus_internal_get_next_colocation_id is a wrapper around + * GetNextColocationIdInternal(). + * + * NB: This can be called by any user; for now we have decided that that's + * ok. We might want to restrict this to users part of a specific role or such + * at some later point. + */ +Datum +citus_internal_get_next_colocation_id(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + EnsureCoordinator(); + + if (!IsCitusInternalBackend()) + { + ereport(ERROR, (errmsg("This UDF can only be called by citus " + "internal backends"))); + } + + uint32 colocationId = GetNextColocationIdInternal(); + Datum colocationIdDatum = Int64GetDatum(colocationId); + + PG_RETURN_DATUM(colocationIdDatum); +} + + +/* + * GetNextColocationIdInternal allocates and returns a unique colocationId for the * colocation group to be created. This allocation occurs both in shared memory * and in write ahead logs; writing to logs avoids the risk of having * colocationId collisions. @@ -652,8 +762,8 @@ InsertColocationGroupLocally(uint32 colocationId, int shardCount, int replicatio * with the master node. Further note that this function relies on an internal * sequence created in initdb to generate unique identifiers. */ -uint32 -GetNextColocationId() +static uint32 +GetNextColocationIdInternal(void) { text *sequenceName = cstring_to_text(COLOCATIONID_SEQUENCE_NAME); Oid sequenceId = ResolveRelationId(sequenceName, false); @@ -1226,6 +1336,17 @@ SingleShardTableColocationNodeId(uint32 colocationId) List *tablesInColocationGroup = ColocationGroupTableList(colocationId, 0); if (list_length(tablesInColocationGroup) == 0) { + /* + * If we're on a worker, first acquire the lock on the coordinator via + * the remote metadata connection to the coordinator as superuser. Fwiw, + * we'll acquire the lock on the local node as well via + * DistributedTablePlacementNodeList(). + */ + if (!IsCoordinator()) + { + LockPgDistNodeOnCoordinatorViaSuperUser(RowShareLock); + } + int workerNodeIndex = EmptySingleShardTableColocationDecideNodeId(colocationId); List *workerNodeList = DistributedTablePlacementNodeList(RowShareLock); diff --git a/src/backend/distributed/utils/reference_table_utils.c b/src/backend/distributed/utils/reference_table_utils.c index 5bdb4fd758b..361c5cbe5df 100644 --- a/src/backend/distributed/utils/reference_table_utils.c +++ b/src/backend/distributed/utils/reference_table_utils.c @@ -71,6 +71,9 @@ PG_FUNCTION_INFO_V1(replicate_reference_tables); Datum replicate_reference_tables(PG_FUNCTION_ARGS) { + CheckCitusVersion(ERROR); + EnsureCoordinator(); + Oid shardReplicationModeOid = PG_GETARG_OID(0); char shardReplicationMode = LookupShardTransferMode(shardReplicationModeOid); @@ -109,6 +112,23 @@ EnsureReferenceTablesExistOnAllNodes(void) * * The transferMode is passed on to the implementation of the copy to control the locks * and transferMode. + * + * When this function is called from the coordinator, it acquires and releases colocation + * id locks locally as needed. However, when this function is called from a worker, we + * still need to acquire the colocation locks on the coordinator, by using a remote + * connection to the coordinator in that case, but this would mean acquiring and releasing + * those locks via separate commands. However, as we cannot release a transactional + * advisory lock via a different command even in the same transaction (*), we commit the + * remote transaction that we used to acquire those locks on the coordinator, in order to + * release them. We achieve this by using a connection that's exclusively used for dealing + * with those locks, and that's initially outside of any transaction, so we can send BEGIN + * / COMMIT over that connection in the middle of the local transaction. + * + * (*): This is because the resource owner changes between commands even within the same + * transaction, so today it's not possible to release a transactional advisory lock + * in a different command. This is also the reason why Postgres provides + * pg_advisory_xact_lock() but doesn't provide a UDF to release a transactional + * advisory lock. */ void EnsureReferenceTablesExistOnAllNodesExtended(char transferMode) @@ -125,6 +145,38 @@ EnsureReferenceTablesExistOnAllNodesExtended(char transferMode) return; } + /* + * We always want to acquire colocation id locks on the coordinator, so we + * need to open a connection to the coordinator when we're on a worker node. + */ + MultiConnection *coordinatorLockConn = NULL; + if (!IsCoordinator()) + { + WorkerNode *coordinator = CoordinatorNodeIfAddedAsWorkerOrError(); + char *coordinatorHostname = coordinator->workerName; + uint32 coordinatorPort = coordinator->workerPort; + + coordinatorLockConn = GetNodeUserDatabaseConnection( + OUTSIDE_TRANSACTION, coordinatorHostname, coordinatorPort, + CurrentUserName(), NULL); + if (PQstatus(coordinatorLockConn->pgConn) != CONNECTION_OK) + { + ReportConnectionError(coordinatorLockConn, WARNING); + ereport(ERROR, (errmsg("could not open a connection to coordinator " + "while checking if reference tables are " + "replicated to all nodes, see the earlier " + "warnings"), + errdetail("Checking reference table replication " + "requires connectivity to coordinator."))); + } + + UseCoordinatedTransaction(); + + ClaimConnectionExclusively(coordinatorLockConn); + + RemoteTransactionBegin(coordinatorLockConn); + } + /* * Most of the time this function should result in a conclusion where we do not need * to copy any reference tables. To prevent excessive locking the majority of the time @@ -139,7 +191,7 @@ EnsureReferenceTablesExistOnAllNodesExtended(char transferMode) * DROP TABLE and create_reference_table calls so that the list of reference tables we * operate on are stable. * - * Since the changes to the reference table placements are made via loopback + * Since the changes to the reference table placements are made via loopback or remote * connections we release the locks held at the end of this function. Due to Citus * only running transactions in READ COMMITTED mode we can be sure that other * transactions correctly find the metadata entries. @@ -147,20 +199,39 @@ EnsureReferenceTablesExistOnAllNodesExtended(char transferMode) LOCKMODE lockmodes[] = { AccessShareLock, ExclusiveLock }; for (int lockmodeIndex = 0; lockmodeIndex < lengthof(lockmodes); lockmodeIndex++) { - LockColocationId(colocationId, lockmodes[lockmodeIndex]); + if (!coordinatorLockConn) + { + LockColocationId(colocationId, + lockmodes[lockmodeIndex]); + } + else + { + char *command = + LockColocationIdCommand(colocationId, + lockmodes[lockmodeIndex]); + ExecuteCriticalRemoteCommand(coordinatorLockConn, command); + } referenceTableIdList = CitusTableTypeIdList(REFERENCE_TABLE); if (referenceTableIdList == NIL) { - /* - * No reference tables exist, make sure that any locks obtained earlier are - * released. It will probably not matter, but we release the locks in the - * reverse order we obtained them in. - */ - for (int releaseLockmodeIndex = lockmodeIndex; releaseLockmodeIndex >= 0; - releaseLockmodeIndex--) + if (coordinatorLockConn) { - UnlockColocationId(colocationId, lockmodes[releaseLockmodeIndex]); + RemoteTransactionCommit(coordinatorLockConn); + CloseConnection(coordinatorLockConn); + } + else + { + /* + * No reference tables exist, make sure that any locks obtained earlier are + * released. It will probably not matter, but we release the locks in the + * reverse order we obtained them in. + */ + for (int releaseLockmodeIndex = lockmodeIndex; releaseLockmodeIndex >= 0; + releaseLockmodeIndex--) + { + UnlockColocationId(colocationId, lockmodes[releaseLockmodeIndex]); + } } return; } @@ -186,15 +257,23 @@ EnsureReferenceTablesExistOnAllNodesExtended(char transferMode) newWorkersList = WorkersWithoutReferenceTablePlacement(shardId, AccessShareLock); if (list_length(newWorkersList) == 0) { - /* - * All workers already have a copy of the reference tables, make sure that - * any locks obtained earlier are released. It will probably not matter, but - * we release the locks in the reverse order we obtained them in. - */ - for (int releaseLockmodeIndex = lockmodeIndex; releaseLockmodeIndex >= 0; - releaseLockmodeIndex--) + if (coordinatorLockConn) { - UnlockColocationId(colocationId, lockmodes[releaseLockmodeIndex]); + RemoteTransactionCommit(coordinatorLockConn); + CloseConnection(coordinatorLockConn); + } + else + { + /* + * All workers already have a copy of the reference tables, make sure that + * any locks obtained earlier are released. It will probably not matter, but + * we release the locks in the reverse order we obtained them in. + */ + for (int releaseLockmodeIndex = lockmodeIndex; releaseLockmodeIndex >= 0; + releaseLockmodeIndex--) + { + UnlockColocationId(colocationId, lockmodes[releaseLockmodeIndex]); + } } return; } @@ -202,7 +281,7 @@ EnsureReferenceTablesExistOnAllNodesExtended(char transferMode) /* * citus_copy_shard_placement triggers metadata sync-up, which tries to - * acquire a ShareLock on pg_dist_node. We do master_copy_shad_placement + * acquire a ShareLock on pg_dist_node. We do citus_copy_shard_placement * in a separate connection. If we have modified pg_dist_node in the * current backend, this will cause a deadlock. */ @@ -249,8 +328,16 @@ EnsureReferenceTablesExistOnAllNodesExtended(char transferMode) const char *userName = CitusExtensionOwnerName(); int connectionFlags = OUTSIDE_TRANSACTION; + char *coordinatorHostname = LocalHostName; + int32 coordinatorPort = PostPortNumber; + if (!IsCoordinator()) + { + WorkerNode *coordinator = CoordinatorNodeIfAddedAsWorkerOrError(); + coordinatorHostname = coordinator->workerName; + coordinatorPort = coordinator->workerPort; + } MultiConnection *connection = GetNodeUserDatabaseConnection( - connectionFlags, LocalHostName, PostPortNumber, + connectionFlags, coordinatorHostname, coordinatorPort, userName, NULL); if (PQstatus(connection->pgConn) == CONNECTION_OK) @@ -287,17 +374,25 @@ EnsureReferenceTablesExistOnAllNodesExtended(char transferMode) CloseConnection(connection); } - /* - * Since reference tables have been copied via a loopback connection we do not have - * to retain our locks. Since Citus only runs well in READ COMMITTED mode we can be - * sure that other transactions will find the reference tables copied. - * We have obtained and held multiple locks, here we unlock them all in the reverse - * order we have obtained them in. - */ - for (int releaseLockmodeIndex = lengthof(lockmodes) - 1; releaseLockmodeIndex >= 0; - releaseLockmodeIndex--) + if (coordinatorLockConn) { - UnlockColocationId(colocationId, lockmodes[releaseLockmodeIndex]); + RemoteTransactionCommit(coordinatorLockConn); + CloseConnection(coordinatorLockConn); + } + else + { + /* + * Since reference tables have been copied via a loopback or remote connection we do not have + * to retain our locks. Since Citus only runs well in READ COMMITTED mode we can be + * sure that other transactions will find the reference tables copied. + * We have obtained and held multiple locks, here we unlock them all in the reverse + * order we have obtained them in. + */ + for (int releaseLockmodeIndex = lengthof(lockmodes) - 1; + releaseLockmodeIndex >= 0; releaseLockmodeIndex--) + { + UnlockColocationId(colocationId, lockmodes[releaseLockmodeIndex]); + } } } @@ -365,6 +460,8 @@ ScheduleTasksToParallelCopyReferenceTablesOnAllMissingNodes(int64 jobId, char tr LOCKMODE lockmodes[] = { AccessShareLock, ExclusiveLock }; for (int lockmodeIndex = 0; lockmodeIndex < lengthof(lockmodes); lockmodeIndex++) { + /* acquiring the lock locally should be okay as we're on the coordinator */ + Assert(IsCoordinator()); LockColocationId(colocationId, lockmodes[lockmodeIndex]); referenceTableIdList = CitusTableTypeIdList(REFERENCE_TABLE); @@ -420,7 +517,7 @@ ScheduleTasksToParallelCopyReferenceTablesOnAllMissingNodes(int64 jobId, char tr /* * citus_copy_shard_placement triggers metadata sync-up, which tries to - * acquire a ShareLock on pg_dist_node. We do master_copy_shad_placement + * acquire a ShareLock on pg_dist_node. We do citus_copy_shard_placement * in a separate connection. If we have modified pg_dist_node in the * current backend, this will cause a deadlock. */ @@ -702,6 +799,9 @@ HasNodesWithMissingReferenceTables(List **referenceTableList) /* we have no reference table yet. */ return false; } + + /* acquiring the lock locally should be okay as we're on the coordinator */ + Assert(IsCoordinator()); LockColocationId(colocationId, AccessShareLock); List *referenceTableIdList = CitusTableTypeIdList(REFERENCE_TABLE); @@ -766,6 +866,9 @@ AnyRelationsModifiedInTransaction(List *relationIdList) * WorkersWithoutReferenceTablePlacement returns a list of workers (WorkerNode) that * do not yet have a placement for the given reference table shard ID, but are * supposed to. + * + * Note that this acquires a lock on pg_dist_node with provided lock mode on the + * coordinator as well, and doesn't release it until the end of the transaction. */ static List * WorkersWithoutReferenceTablePlacement(uint64 shardId, LOCKMODE lockMode) @@ -774,6 +877,17 @@ WorkersWithoutReferenceTablePlacement(uint64 shardId, LOCKMODE lockMode) List *shardPlacementList = ActiveShardPlacementList(shardId); + /* + * If we're on a worker, first acquire the lock on the coordinator via the + * remote metadata connection to the coordinator as superuser. Fwiw, we'll + * acquire the lock on the local node as well via + * ReferenceTablePlacementNodeList(). + */ + if (!IsCoordinator()) + { + LockPgDistNodeOnCoordinatorViaSuperUser(lockMode); + } + List *workerNodeList = ReferenceTablePlacementNodeList(lockMode); workerNodeList = SortList(workerNodeList, CompareWorkerNodes); diff --git a/src/backend/distributed/utils/resource_lock.c b/src/backend/distributed/utils/resource_lock.c index 9edfc4943a9..9ffe70bd429 100644 --- a/src/backend/distributed/utils/resource_lock.c +++ b/src/backend/distributed/utils/resource_lock.c @@ -26,6 +26,7 @@ #include "utils/lsyscache.h" #include "utils/varlena.h" +#include "distributed/argutils.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" #include "distributed/coordinator_protocol.h" @@ -90,7 +91,6 @@ typedef struct LockRelationRecord /* local function forward declarations */ -static LOCKMODE IntToLockMode(int mode); static void LockReferencedReferenceShardResources(uint64 shardId, LOCKMODE lockMode); static bool AnyTableReplicated(List *shardIntervalList, List **replicatedShardIntervalList); @@ -108,6 +108,7 @@ static void SetLocktagForShardDistributionMetadata(int64 shardId, LOCKTAG *tag); PG_FUNCTION_INFO_V1(lock_shard_metadata); PG_FUNCTION_INFO_V1(lock_shard_resources); PG_FUNCTION_INFO_V1(lock_relation_if_exists); +PG_FUNCTION_INFO_V1(citus_internal_lock_colocation_id); /* Config variable managed via guc.c */ bool EnableAcquiringUnsafeLockFromWorkers = false; @@ -266,6 +267,26 @@ lock_shard_resources(PG_FUNCTION_ARGS) } +/* + * citus_internal_lock_colocation_id calls LockColocationId. + */ +Datum +citus_internal_lock_colocation_id(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + + PG_ENSURE_ARGNOTNULL(0, "colocation_id"); + int32 colocationId = PG_GETARG_INT32(0); + + PG_ENSURE_ARGNOTNULL(1, "lock_mode"); + LOCKMODE lockMode = IntToLockMode(PG_GETARG_INT32(1)); + + LockColocationId(colocationId, lockMode); + + PG_RETURN_VOID(); +} + + /* * LockShardListResourcesOnFirstWorker acquires the resource locks for the specified * shards on the first worker. Acquiring a lock with or without metadata does not @@ -423,7 +444,7 @@ LockShardListMetadataOnWorkers(LOCKMODE lockmode, List *shardIntervalList) * IntToLockMode verifies whether the specified integer is an accepted lock mode * and returns it as a LOCKMODE enum. */ -static LOCKMODE +LOCKMODE IntToLockMode(int mode) { if (mode == ExclusiveLock) @@ -479,6 +500,19 @@ UnlockColocationId(int colocationId, LOCKMODE lockMode) } +/* + * LockColocationIdCommand returns a command to acquire a co-location id lock. + */ +char * +LockColocationIdCommand(int colocationId, LOCKMODE lockMode) +{ + StringInfo lockCommand = makeStringInfo(); + appendStringInfo(lockCommand, "SELECT citus_internal.lock_colocation_id(%d, %d)", + colocationId, lockMode); + return lockCommand->data; +} + + /* * LockShardDistributionMetadata returns after grabbing a lock for distribution * metadata related to the specified shard, blocking if required. Any locks diff --git a/src/backend/distributed/worker/worker_data_fetch_protocol.c b/src/backend/distributed/worker/worker_data_fetch_protocol.c index 0370001eec2..dade1857cff 100644 --- a/src/backend/distributed/worker/worker_data_fetch_protocol.c +++ b/src/backend/distributed/worker/worker_data_fetch_protocol.c @@ -49,6 +49,7 @@ #include "distributed/deparser.h" #include "distributed/intermediate_results.h" #include "distributed/listutils.h" +#include "distributed/metadata/dependency.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/multi_logical_optimizer.h" @@ -64,8 +65,6 @@ /* Local functions forward declarations */ static bool check_log_statement(List *stmt_list); -static void AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName, - Oid sequenceTypeId); /* exports for SQL callable functions */ @@ -73,6 +72,7 @@ PG_FUNCTION_INFO_V1(worker_apply_shard_ddl_command); PG_FUNCTION_INFO_V1(worker_apply_inter_shard_ddl_command); PG_FUNCTION_INFO_V1(worker_apply_sequence_command); PG_FUNCTION_INFO_V1(worker_adjust_identity_column_seq_ranges); +PG_FUNCTION_INFO_V1(citus_internal_adjust_identity_column_seq_settings); PG_FUNCTION_INFO_V1(worker_append_table_to_shard); PG_FUNCTION_INFO_V1(worker_nextval); @@ -137,10 +137,13 @@ worker_apply_inter_shard_ddl_command(PG_FUNCTION_ARGS) /* - * worker_adjust_identity_column_seq_ranges takes a table oid, runs an ALTER SEQUENCE statement - * for each identity column to adjust the minvalue and maxvalue of the sequence owned by - * identity column such that the sequence creates globally unique values. - * We use table oid instead of sequence name to avoid any potential conflicts between sequences of different tables. This way, we can safely iterate through identity columns on a specific table without any issues. While this may introduce a small amount of business logic to workers, it's a much safer approach overall. + * worker_adjust_identity_column_seq_ranges implements the legacy + * worker_adjust_identity_column_seq_ranges() UDF. It is kept for backward + * compatibility when an operation is initiated from a node that is not yet + * upgraded and still assumes the presence of this UDF version on remote nodes, + * calling it on nodes that have already been upgraded to a newer Citus version. + * Keeping this implementation allows upgraded nodes to continue responding to + * such calls. */ Datum worker_adjust_identity_column_seq_ranges(PG_FUNCTION_ARGS) @@ -191,6 +194,260 @@ worker_adjust_identity_column_seq_ranges(PG_FUNCTION_ARGS) } +/* + * citus_internal_adjust_identity_column_seq_settings takes a sequence oid; + * and if this's a worker node, then runs an ALTER SEQUENCE statement to adjust + * the minvalue and maxvalue of it so that the sequence creates globally unique + * values. When called on coordinator, the function sets the sequence's last value + * to the given last value. + */ +Datum +citus_internal_adjust_identity_column_seq_settings(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + + Oid sequenceId = PG_GETARG_OID(0); + int64 lastValue = PG_GETARG_INT64(1); + bool isCalled = PG_GETARG_BOOL(2); + + EnsureTableOwner(sequenceId); + + /* + * While altering a sequence, avoid propagating the DDL to other nodes + * if it's already marked as distributed. + */ + bool oldEnableDDLPropagation = EnableDDLPropagation; + SetLocalEnableDDLPropagation(false); + + Oid sequenceSchemaOid = get_rel_namespace(sequenceId); + char *sequenceSchemaName = get_namespace_name(sequenceSchemaOid); + char *sequenceName = get_rel_name(sequenceId); + Oid sequenceTypeId = pg_get_sequencedef(sequenceId)->seqtypid; + + if (IsCoordinator()) + { + DirectFunctionCall3(setval3_oid, + ObjectIdGetDatum(sequenceId), + Int64GetDatum(lastValue), + BoolGetDatum(isCalled)); + } + else + { + AlterSequenceMinMax(sequenceId, sequenceSchemaName, sequenceName, + sequenceTypeId); + } + + SetLocalEnableDDLPropagation(oldEnableDDLPropagation); + + PG_RETURN_VOID(); +} + + +/* + * AdjustDependentSeqRangesOnLocalWorker takes a table oid, finds all sequences + * the table depends on, and runs AlterSequenceMinMax() for each. + * + * Note that this doesn't adjust sequence ranges for identity columns by design, + * see comments written for the call made to GetAllDependenciesForObject() in + * the function body for more details. + * + * The function assumes that it's run on a worker node. + */ +void +AdjustDependentSeqRangesOnLocalWorker(Oid relationId) +{ + Assert(!IsCoordinator()); + + /* + * While altering a sequence, avoid propagating the DDL to other nodes + * if it's already marked as distributed. + */ + bool oldEnableDDLPropagation = EnableDDLPropagation; + SetLocalEnableDDLPropagation(false); + + ObjectAddress address = { 0 }; + ObjectAddressSubSet(address, RelationRelationId, relationId, 0); + + /* + * We use GetAllDependenciesForObject() instead of GetDependenciesForObject() + * because we want to collect the sequences even if they're already marked + * as distributed. This is because, today this function is called after most + * of the work to distribute a table is done. + * + * Also note that as GetAllDependenciesForObject() uses ExpandCitusSupportedTypes(), + * while it can capture the sequences used by serial columns, it explicitly + * discards sequences used by identity columns. + */ + List *dependencies = GetAllDependenciesForObject(&address); + ObjectAddress *dependency = NULL; + foreach_declared_ptr(dependency, dependencies) + { + if (getObjectClass(dependency) != OCLASS_CLASS || + get_rel_relkind(dependency->objectId) != RELKIND_SEQUENCE) + { + continue; + } + + Oid sequenceOid = dependency->objectId; + Oid sequenceSchemaOid = get_rel_namespace(sequenceOid); + char *sequenceSchemaName = get_namespace_name(sequenceSchemaOid); + char *sequenceName = get_rel_name(sequenceOid); + Oid sequenceTypeId = pg_get_sequencedef(sequenceOid)->seqtypid; + + AlterSequenceMinMax(sequenceOid, sequenceSchemaName, sequenceName, + sequenceTypeId); + } + + SetLocalEnableDDLPropagation(oldEnableDDLPropagation); +} + + +/* + * AdjustIdentityColumnSeqRangesOnLocalWorker takes a table oid, finds all identity + * columns on the table, and runs AlterSequenceMinMax() for each underlying sequence. + * + * The function assumes that it's run on a worker node. + */ +void +AdjustIdentityColumnSeqRangesOnLocalWorker(Oid relationId) +{ + Assert(!IsCoordinator()); + + /* + * While altering a sequence, avoid propagating the DDL to other nodes + * if it's already marked as distributed. + */ + bool oldEnableDDLPropagation = EnableDDLPropagation; + SetLocalEnableDDLPropagation(false); + + Relation tableRelation = relation_open(relationId, AccessShareLock); + TupleDesc tableTupleDesc = RelationGetDescr(tableRelation); + + bool missingSequenceOk = false; + + for (int attributeIndex = 0; attributeIndex < tableTupleDesc->natts; + attributeIndex++) + { + Form_pg_attribute attributeForm = TupleDescAttr(tableTupleDesc, + attributeIndex); + + /* skip dropped columns */ + if (attributeForm->attisdropped) + { + continue; + } + + if (attributeForm->attidentity) + { + Oid sequenceOid = getIdentitySequence(identitySequenceRelation_compat( + tableRelation), + attributeForm->attnum, + missingSequenceOk); + + Oid sequenceSchemaOid = get_rel_namespace(sequenceOid); + char *sequenceSchemaName = get_namespace_name(sequenceSchemaOid); + char *sequenceName = get_rel_name(sequenceOid); + Oid sequenceTypeId = pg_get_sequencedef(sequenceOid)->seqtypid; + + AlterSequenceMinMax(sequenceOid, sequenceSchemaName, sequenceName, + sequenceTypeId); + } + } + + relation_close(tableRelation, NoLock); + + SetLocalEnableDDLPropagation(oldEnableDDLPropagation); +} + + +/* + * AdjustNextValColumnDefaultsOnLocalWorker takes a table oid, + * finds all int / smallint based columns with nextval() default + * expressions on the table, and runs an ALTER COLUMN statement for each + * column to change the default expression to use worker_nextval() instead + * of nextval(). + * + * The function assumes that it's run on a worker node. + */ +void +AdjustNextValColumnDefaultsOnLocalWorker(Oid relationId) +{ + Assert(!IsCoordinator()); + + /* + * While altering a sequence, avoid propagating the DDL to other nodes + * if it's already marked as distributed. + */ + bool oldEnableDDLPropagation = EnableDDLPropagation; + SetLocalEnableDDLPropagation(false); + + Relation tableRelation = relation_open(relationId, AccessShareLock); + TupleDesc tupleDescriptor = RelationGetDescr(tableRelation); + TupleDesc copiedTupleDescriptor = CreateTupleDescCopyConstr(tupleDescriptor); + relation_close(tableRelation, AccessShareLock); + + TupleConstr *tupleConstraints = copiedTupleDescriptor->constr; + AttrNumber defaultValueIndex = 0; + for (int attributeIndex = 0; attributeIndex < copiedTupleDescriptor->natts; + attributeIndex++) + { + Form_pg_attribute attributeForm = TupleDescAttr(copiedTupleDescriptor, + attributeIndex); + + if (attributeForm->attisdropped || !attributeForm->atthasdef) + { + continue; + } + + Assert(tupleConstraints != NULL); + + AttrDefault *defaultValueList = tupleConstraints->defval; + Assert(defaultValueList != NULL); + + AttrDefault *defaultValue = &(defaultValueList[defaultValueIndex]); + defaultValueIndex++; + + Assert(defaultValue->adnum == (attributeIndex + 1)); + Assert(defaultValueIndex <= tupleConstraints->num_defval); + + if (attributeForm->attgenerated) + { + continue; + } + + /* convert expression to node tree */ + Node *defaultNode = (Node *) stringToNode(defaultValue->adbin); + + if (!contain_nextval_expression_walker(defaultNode, NULL)) + { + continue; + } + + Oid seqOid = GetSequenceOid(relationId, defaultValue->adnum); + if (seqOid == InvalidOid || pg_get_sequencedef(seqOid)->seqtypid == INT8OID) + { + continue; + } + + /* + * We use worker_nextval for int and smallint types. + * Check issue #5126 and PR #5254 for details. + * https://github.com/citusdata/citus/issues/5126 + */ + bool missingOk = false; + bool forceUseNextVal = false; + char *command = + GetAlterColumnWithNextvalDefaultCmd(seqOid, relationId, + NameStr(attributeForm->attname), + missingOk, forceUseNextVal); + + ExecuteAndLogUtilityCommand(command); + } + + SetLocalEnableDDLPropagation(oldEnableDDLPropagation); +} + + /* * worker_apply_sequence_command takes a CREATE SEQUENCE command string, runs the * CREATE SEQUENCE command then creates and runs an ALTER SEQUENCE statement @@ -204,6 +461,25 @@ worker_apply_sequence_command(PG_FUNCTION_ARGS) text *commandText = PG_GETARG_TEXT_P(0); Oid sequenceTypeId = PG_GETARG_OID(1); + + /* + * Support the legacy version of this UDF. This is for the sake of backward + * compatibility when an operation is initiated from a node that is not yet + * upgraded and still assumes the presence of the older UDF version on remote nodes, + * calling it on nodes that have already been upgraded to a newer Citus version. + * Keeping this implementation allows upgraded nodes to continue responding to + * such calls. + */ + bool newParamsProvided = false; + int64 lastValue = 0; + bool isCalled = false; + if (PG_NARGS() == 4) + { + newParamsProvided = true; + lastValue = PG_GETARG_INT64(2); + isCalled = PG_GETARG_BOOL(3); + } + const char *commandString = text_to_cstring(commandText); Node *commandNode = ParseTreeNode(commandString); @@ -237,7 +513,35 @@ worker_apply_sequence_command(PG_FUNCTION_ARGS) Assert(sequenceRelationId != InvalidOid); - AlterSequenceMinMax(sequenceRelationId, sequenceSchema, sequenceName, sequenceTypeId); + if (IsCoordinator()) + { + /* + * This cannot really happen but still check. + * + * This is because, in the older versions of Citus, we were never calling + * this UDF on the coordinator node. For this reason, if this is executed + * against the coordinator, then the node initiating the operation should + * actually be assuming the new version of this UDF. In that case, last_value + * must always be provided. + */ + if (!newParamsProvided) + { + ereport(ERROR, + (errmsg("last_value and is_called must be provided when adjusting " + "sequence setting on coordinator"))); + } + + DirectFunctionCall3(setval3_oid, + ObjectIdGetDatum(sequenceRelationId), + Int64GetDatum(lastValue), + BoolGetDatum(isCalled)); + } + else + { + AlterSequenceMinMax(sequenceRelationId, sequenceSchema, sequenceName, + sequenceTypeId); + } + PG_RETURN_VOID(); } @@ -405,7 +709,7 @@ check_log_statement(List *statementList) * This is to ensure every group of workers passes out values from a unique range, * and therefore that all values generated for the sequence are globally unique. */ -static void +void AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName, Oid sequenceTypeId) { diff --git a/src/include/distributed/colocation_utils.h b/src/include/distributed/colocation_utils.h index b5ce0a28f37..58f21855e64 100644 --- a/src/include/distributed/colocation_utils.h +++ b/src/include/distributed/colocation_utils.h @@ -39,7 +39,6 @@ extern void InsertColocationGroupLocally(uint32 colocationId, int shardCount, Oid distributionColumnCollation); extern bool IsColocateWithNone(char *colocateWithTableName); extern bool IsColocateWithDefault(char *colocateWithTableName); -extern uint32 GetNextColocationId(void); extern void ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId); extern void CheckReplicationModel(Oid sourceRelationId, Oid targetRelationId); extern void CheckDistributionColumnType(Oid sourceRelationId, Oid targetRelationId); diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 2d8ed3b2f09..c26b1ff61bb 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -188,10 +188,17 @@ extern List * PreprocessClusterStmt(Node *node, const char *clusterCommand, /* common.c - forward declarations*/ extern List * PostprocessCreateDistributedObjectFromCatalogStmt(Node *stmt, const char *queryString); -extern List * PreprocessAlterDistributedObjectStmt(Node *stmt, const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PostprocessAlterDistributedObjectStmt(Node *stmt, const char *queryString); +extern List * PreprocessAlterDistributedObjectStmtFromCoordinator(Node *stmt, + const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PreprocessAlterDistributedObjectStmtFromAnyNode(Node *stmt, + const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PostprocessAlterDistributedObjectStmtFromCoordinator(Node *stmt, + const char *queryString + ); extern List * PreprocessDropDistributedObjectStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); @@ -666,7 +673,8 @@ extern Oid GetSequenceOid(Oid relationId, AttrNumber attnum); extern bool ConstrTypeUsesIndex(ConstrType constrType); extern bool ConstrTypeCitusCanDefaultName(ConstrType constrType); extern char * GetAlterColumnWithNextvalDefaultCmd(Oid sequenceOid, Oid relationId, - char *colname, bool missingTableOk); + char *colname, bool missingTableOk, + bool forceUseNextVal); extern void ErrorIfTableHasIdentityColumn(Oid relationId); extern void ConvertNewTableIfNecessary(Node *createStmt); diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h index 0f093e0870f..00698554b5f 100644 --- a/src/include/distributed/commands/utility_hook.h +++ b/src/include/distributed/commands/utility_hook.h @@ -106,6 +106,7 @@ extern List * DDLTaskList(Oid relationId, const char *commandString); extern List * NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands, bool warnForPartialFailure); extern List * NodeDDLTaskList(TargetWorkerSet targets, List *commands); +extern void SetLocalEnableDDLPropagation(bool state); extern bool AlterTableInProgress(void); extern bool DropSchemaOrDBInProgress(void); extern void UndistributeDisconnectedCitusLocalTables(void); diff --git a/src/include/distributed/connection_management.h b/src/include/distributed/connection_management.h index c742eb4e948..8032a0ea37b 100644 --- a/src/include/distributed/connection_management.h +++ b/src/include/distributed/connection_management.h @@ -84,6 +84,14 @@ enum MultiConnectionMode */ REQUIRE_CLEAN_CONNECTION = 1 << 3, + /* + * Note that a connection returned with this flag is not guaranteed to stay + * outside of transactions, e.g., if we later start a coordinated transaction + * and execute a command over this connection. For this reason, the callers + * should also mark the connection exclusive by using + * ClaimConnectionExclusively() to keep using the same connection outside of + * transactions for multiple commands. + */ OUTSIDE_TRANSACTION = 1 << 4, /* diff --git a/src/include/distributed/distributed_planner.h b/src/include/distributed/distributed_planner.h index b0c3347bec7..5d8c8907439 100644 --- a/src/include/distributed/distributed_planner.h +++ b/src/include/distributed/distributed_planner.h @@ -280,6 +280,7 @@ extern int32 BlessRecordExpression(Expr *expr); extern void DissuadePlannerFromUsingPlan(PlannedStmt *plan); extern PlannedStmt * FinalizePlan(PlannedStmt *localPlan, struct DistributedPlan *distributedPlan); +extern void DisableTrackingQueryCountersForPlannedStmt(PlannedStmt *plannedStmt); extern bool ContainsSingleShardTable(Query *query); extern RTEListProperties * GetRTEListPropertiesForQuery(Query *query); diff --git a/src/include/distributed/metadata_sync.h b/src/include/distributed/metadata_sync.h index 90a3662c661..f5c82983256 100644 --- a/src/include/distributed/metadata_sync.h +++ b/src/include/distributed/metadata_sync.h @@ -117,7 +117,7 @@ extern List * GenerateGrantOnFDWQueriesFromAclItem(Oid serverId, AclItem *aclIte extern char * PlacementUpsertCommand(uint64 shardId, uint64 placementId, uint64 shardLength, int32 groupId); extern TableDDLCommand * TruncateTriggerCreateCommand(Oid relationId); -extern void CreateInterTableRelationshipOfRelationOnWorkers(Oid relationId); +extern void CreateInterTableRelationshipOfRelationOnRemoteNodes(Oid relationId); extern List * InterTableRelationshipOfRelationCommandList(Oid relationId); extern List * DetachPartitionCommandList(void); extern void SyncNodeMetadataToNodes(void); @@ -204,7 +204,10 @@ extern void SendInterTableRelationshipCommands(MetadataSyncContext *context); #define ENABLE_DDL_PROPAGATION "SET citus.enable_ddl_propagation TO 'on'" #define DISABLE_METADATA_SYNC "SET citus.enable_metadata_sync TO 'off'" #define ENABLE_METADATA_SYNC "SET citus.enable_metadata_sync TO 'on'" -#define WORKER_APPLY_SEQUENCE_COMMAND "SELECT worker_apply_sequence_command (%s,%s)" +#define WORKER_APPLY_SEQUENCE_COMMAND \ + "SELECT worker_apply_sequence_command (%s,%s," INT64_FORMAT ", %s)" +#define WORKER_APPLY_SEQUENCE_COMMAND_LEGACY \ + "SELECT worker_apply_sequence_command (%s,%s)" #define UPSERT_PLACEMENT \ "INSERT INTO pg_dist_placement " \ "(shardid, shardstate, shardlength, " \ @@ -218,8 +221,11 @@ extern void SendInterTableRelationshipCommands(MetadataSyncContext *context); "placementid = EXCLUDED.placementid" #define METADATA_SYNC_CHANNEL "metadata_sync" -#define WORKER_ADJUST_IDENTITY_COLUMN_SEQ_RANGES \ +#define WORKER_ADJUST_IDENTITY_COLUMN_SEQ_RANGES_LEGACY \ "SELECT pg_catalog.worker_adjust_identity_column_seq_ranges(%s)" +#define WORKER_ADJUST_IDENTITY_COLUMN_SEQ_SETTINGS \ + "SELECT citus_internal.adjust_identity_column_seq_settings(%s, " INT64_FORMAT \ + ", %s)" /* controlled via GUC */ extern char *EnableManualMetadataChangesForUser; diff --git a/src/include/distributed/metadata_utility.h b/src/include/distributed/metadata_utility.h index 8abbca18a45..4b7cc015d74 100644 --- a/src/include/distributed/metadata_utility.h +++ b/src/include/distributed/metadata_utility.h @@ -325,7 +325,7 @@ extern ShardInterval * CopyShardInterval(ShardInterval *srcInterval); extern uint64 ShardLength(uint64 shardId); extern bool NodeGroupHasShardPlacements(int32 groupId); extern bool IsActiveShardPlacement(ShardPlacement *ShardPlacement); -extern bool IsRemoteShardPlacement(ShardPlacement *shardPlacement); +extern bool IsNonCoordShardPlacement(ShardPlacement *shardPlacement); extern bool IsPlacementOnWorkerNode(ShardPlacement *placement, WorkerNode *workerNode); extern List * FilterShardPlacementList(List *shardPlacementList, bool (*filter)( ShardPlacement *)); @@ -376,13 +376,14 @@ extern void DeleteShardRow(uint64 shardId); extern void UpdatePlacementGroupId(uint64 placementId, int groupId); extern void DeleteShardPlacementRowGlobally(uint64 placementId); extern void DeleteShardPlacementRow(uint64 placementId); -extern void CreateSingleShardTable(Oid relationId, ColocationParam colocationParam); +extern void CreateSingleShardTable(Oid relationId, ColocationParam colocationParam, + bool allowFromWorkersIfPostgresTable); extern void CreateDistributedTable(Oid relationId, char *distributionColumnName, char distributionMethod, int shardCount, bool shardCountIsStrict, char *colocateWithTableName); extern void CreateReferenceTable(Oid relationId); extern void CreateTruncateTrigger(Oid relationId); -extern uint64 CopyFromLocalTableIntoDistTable(Oid localTableId, Oid distributedTableId); +extern List * CopyablePlainColumnNameListFromTupleDesc(TupleDesc tupleDescriptor); extern void EnsureUndistributeTenantTableSafe(Oid relationId, const char *operationName); extern TableConversionReturn * UndistributeTable(TableConversionParameters *params); extern void UndistributeTables(List *relationIdList); diff --git a/src/include/distributed/multi_physical_planner.h b/src/include/distributed/multi_physical_planner.h index 25ca24ec7a8..3f13b0df97a 100644 --- a/src/include/distributed/multi_physical_planner.h +++ b/src/include/distributed/multi_physical_planner.h @@ -487,6 +487,11 @@ typedef struct DistributedPlan * of source rows to be repartitioned for colocation with the target. */ int sourceResultRepartitionColumnIndex; + + /* + * Disables tracking query stat counters if true. + */ + bool disableTrackingQueryCounters; } DistributedPlan; diff --git a/src/include/distributed/resource_lock.h b/src/include/distributed/resource_lock.h index b6f21e5ab37..80602f7f95c 100644 --- a/src/include/distributed/resource_lock.h +++ b/src/include/distributed/resource_lock.h @@ -248,12 +248,14 @@ extern void LockShardResource(uint64 shardId, LOCKMODE lockmode); /* Lock a co-location group */ extern void LockColocationId(int colocationId, LOCKMODE lockMode); extern void UnlockColocationId(int colocationId, LOCKMODE lockMode); +extern char * LockColocationIdCommand(int colocationId, LOCKMODE lockMode); /* Lock multiple shards for safe modification */ extern void LockShardListMetadata(List *shardIntervalList, LOCKMODE lockMode); extern void LockShardListMetadataOnWorkers(LOCKMODE lockmode, List *shardIntervalList); extern void LockShardsInPlacementListMetadata(List *shardPlacementList, LOCKMODE lockMode); +extern LOCKMODE IntToLockMode(int mode); extern void LockTransactionRecovery(LOCKMODE lockMode); diff --git a/src/include/distributed/worker_manager.h b/src/include/distributed/worker_manager.h index 189f63cb51a..455b0e83261 100644 --- a/src/include/distributed/worker_manager.h +++ b/src/include/distributed/worker_manager.h @@ -72,6 +72,7 @@ extern WorkerNode * WorkerGetRoundRobinCandidateNode(List *workerNodeList, extern uint32 ActivePrimaryNonCoordinatorNodeCount(void); extern uint32 ActiveReadableNodeCount(void); extern List * ActivePrimaryNonCoordinatorNodeList(LOCKMODE lockMode); +extern List * ActivePrimaryRemoteNonCoordinatorNodeList(LOCKMODE lockMode); extern List * ActivePrimaryNodeList(LOCKMODE lockMode); extern List * ActivePrimaryRemoteNodeList(LOCKMODE lockMode); extern bool CoordinatorAddedAsWorkerNode(void); @@ -91,8 +92,10 @@ extern WorkerNode * ModifiableWorkerNode(const char *nodeName, int32 nodePort); extern List * ReadDistNode(bool includeNodesFromOtherClusters); extern void EnsureCoordinator(void); extern void EnsurePropagationToCoordinator(void); +extern void EnsureCoordinatorUnlessTenantSchema(Oid relationId); extern void EnsureCoordinatorIsInMetadata(void); extern void InsertCoordinatorIfClusterEmpty(void); +extern void LockPgDistNodeOnCoordinatorViaSuperUser(LOCKMODE lockMode); extern uint32 GroupForNode(char *nodeName, int32 nodePort); extern WorkerNode * PrimaryNodeForGroup(int32 groupId, bool *groupContainsNodes); extern bool NodeIsPrimaryAndRemote(WorkerNode *worker); diff --git a/src/include/distributed/worker_protocol.h b/src/include/distributed/worker_protocol.h index 21c0c44c87e..e8a7b758152 100644 --- a/src/include/distributed/worker_protocol.h +++ b/src/include/distributed/worker_protocol.h @@ -38,6 +38,11 @@ /* Function declarations local to the worker module */ +extern void AdjustDependentSeqRangesOnLocalWorker(Oid relationId); +extern void AdjustIdentityColumnSeqRangesOnLocalWorker(Oid relationId); +extern void AdjustNextValColumnDefaultsOnLocalWorker(Oid relationId); +extern void AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName, + Oid sequenceTypeId); extern uint64 ExtractShardIdFromTableName(const char *tableName, bool missingOk); extern void SetDefElemArg(AlterSeqStmt *statement, const char *name, Node *arg); diff --git a/src/include/distributed/worker_transaction.h b/src/include/distributed/worker_transaction.h index 272b7592890..866e3c25a12 100644 --- a/src/include/distributed/worker_transaction.h +++ b/src/include/distributed/worker_transaction.h @@ -36,6 +36,18 @@ typedef enum TargetWorkerSet */ REMOTE_METADATA_NODES, + + /* + * All the active primary nodes in the metadata which have metadata + * except the local node and the coordinator + */ + REMOTE_NON_COORDINATOR_METADATA_NODES, + + /* + * Only the coordinator node + */ + ONLY_COORDINATOR_NODE, + /* * All the active primary nodes in the metadata except the coordinator */ @@ -72,6 +84,16 @@ extern void SendCommandToRemoteMetadataNodesParams(const char *command, const char *user, int parameterCount, const Oid *parameterTypes, const char *const *parameterValues); +extern void SendCommandToRemoteWorkersWithMetadataParams(const char *command, + const char *user, int + parameterCount, + const Oid *parameterTypes, + const char *const * + parameterValues); +extern void SendCommandToCoordinatorParams(const char *command, + const char *user, int parameterCount, + const Oid *parameterTypes, + const char *const *parameterValues); extern bool SendOptionalCommandListToWorkerOutsideTransaction(const char *nodeName, int32 nodePort, const char *nodeUser, @@ -93,6 +115,9 @@ extern void SendCommandToWorkersWithMetadata(const char *command); extern void SendCommandToWorkersWithMetadataViaSuperUser(const char *command); extern void SendCommandListToWorkersWithMetadata(List *commands); extern void SendCommandToRemoteNodesWithMetadata(const char *command); +extern void SendCommandToRemoteWorkersWithMetadata(const char *command); +extern void SendCommandToCoordinatorViaSuperUser(const char *command); +extern void SendCommandToCoordinator(const char *command); extern void SendCommandToRemoteNodesWithMetadataViaSuperUser(const char *command); extern void SendCommandListToRemoteNodesWithMetadata(List *commands); extern void SendBareCommandListToRemoteMetadataNodes(List *commandList); diff --git a/src/test/regress/Pipfile b/src/test/regress/Pipfile index 8ade5491338..fbb172573da 100644 --- a/src/test/regress/Pipfile +++ b/src/test/regress/Pipfile @@ -4,7 +4,7 @@ url = "https://pypi.python.org/simple" verify_ssl = true [packages] -mitmproxy = {git = "https://github.com/citusdata/mitmproxy.git", ref = "main"} +mitmproxy = {git = "https://github.com/citusdata/mitmproxy.git", ref = "70bad9a3c098f605e5f8b25553e5db5334018ff1"} "aioquic" = ">=1.2.0,<1.3.0" "mitmproxy-rs" = ">=0.12.6,<0.13.0" argon2-cffi = ">=23.1.0" @@ -12,12 +12,12 @@ bcrypt = ">=4.1.2" brotli = "<=1.2.0" h11 = "==0.16.0" h2 = "==4.3.0" -tornado = ">=6.5.1,<6.6.0" +tornado = ">=6.5.5,<6.6.0" zstandard = ">=0.25.0" construct = "*" docopt = "==0.6.2" -cryptography = "==44.0.3" -pytest = "*" +cryptography = "==46.0.7" +pytest = "==9.0.3" psycopg = "*" filelock = "*" pytest-asyncio = "*" @@ -25,12 +25,12 @@ pytest-timeout = "*" pytest-xdist = "*" pytest-repeat = "*" pyyaml = "*" -werkzeug = "==3.1.4" +werkzeug = "==3.1.6" "typing-extensions" = ">=4.13.2,<5" pyperclip = "==1.9.0" [dev-packages] -black = "==24.10.0" +black = "==26.3.1" isort = "*" flake8 = "*" flake8-bugbear = "*" diff --git a/src/test/regress/Pipfile.lock b/src/test/regress/Pipfile.lock index 5b662d4343f..ca68abfdbd9 100644 --- a/src/test/regress/Pipfile.lock +++ b/src/test/regress/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "5f734fff88a49010712613a14addae5f38347c691d0b3004b09f59ba5c7cc061" + "sha256": "912f8e179377d7d34b800be62a5b10c0fa6ecdebc2413d7002f8340679b4c6be" }, "pipfile-spec": 6, "requires": { @@ -92,19 +92,19 @@ }, "asgiref": { "hashes": [ - "sha256:aef8a81283a34d0ab31630c9b7dfe70c812c95eba78171367ca8745e88124734", - "sha256:d89f2d8cd8b56dada7d52fa7dc8075baa08fb836560710d38c292a7a3f78c04e" + "sha256:13acff32519542a1736223fb79a715acdebe24286d98e8b164a73085f40da2c4", + "sha256:1db9021efadb0d9512ce8ffaf72fcef601c7b73a8807a1bb2ef143dc6b14846d" ], "markers": "python_version >= '3.9'", - "version": "==3.10.0" + "version": "==3.11.0" }, "attrs": { "hashes": [ - "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", - "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373" + "sha256:c647aa4a12dfbad9333ca4e71fe62ddc36f4e63b2d260a37a8b83d2f043ac309", + "sha256:d03ceb89cb322a8fd706d4fb91940737b6642aa36998fe130a9bc96c985eff32" ], "markers": "python_version >= '3.9'", - "version": "==25.4.0" + "version": "==26.1.0" }, "bcrypt": { "hashes": [ @@ -292,11 +292,11 @@ }, "certifi": { "hashes": [ - "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c", - "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120" + "sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa", + "sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7" ], "markers": "python_version >= '3.7'", - "version": "==2026.1.4" + "version": "==2026.2.25" }, "cffi": { "hashes": [ @@ -390,11 +390,11 @@ }, "click": { "hashes": [ - "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", - "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6" + "sha256:14162b8b3b3550a7d479eafa77dfd3c38d9dc8951f6f69c78913a8f9a7540fd5", + "sha256:1924d2c27c5653561cd2cae4548d1406039cb79b858b747cfea24924bbc1616d" ], "markers": "python_version >= '3.10'", - "version": "==8.3.1" + "version": "==8.3.2" }, "construct": { "hashes": [ @@ -407,47 +407,59 @@ }, "cryptography": { "hashes": [ - "sha256:02f55fb4f8b79c1221b0961488eaae21015b69b210e18c386b69de182ebb1259", - "sha256:157f1f3b8d941c2bd8f3ffee0af9b049c9665c39d3da9db2dc338feca5e98a43", - "sha256:192ed30fac1728f7587c6f4613c29c584abdc565d7417c13904708db10206645", - "sha256:21a83f6f35b9cc656d71b5de8d519f566df01e660ac2578805ab245ffd8523f8", - "sha256:25cd194c39fa5a0aa4169125ee27d1172097857b27109a45fadc59653ec06f44", - "sha256:3883076d5c4cc56dbef0b898a74eb6992fdac29a7b9013870b34efe4ddb39a0d", - "sha256:3bb0847e6363c037df8f6ede57d88eaf3410ca2267fb12275370a76f85786a6f", - "sha256:3be3f649d91cb182c3a6bd336de8b61a0a71965bd13d1a04a0e15b39c3d5809d", - "sha256:3f07943aa4d7dad689e3bb1638ddc4944cc5e0921e3c227486daae0e31a05e54", - "sha256:479d92908277bed6e1a1c69b277734a7771c2b78633c224445b5c60a9f4bc1d9", - "sha256:4ffc61e8f3bf5b60346d89cd3d37231019c17a081208dfbbd6e1605ba03fa137", - "sha256:5639c2b16764c6f76eedf722dbad9a0914960d3489c0cc38694ddf9464f1bb2f", - "sha256:58968d331425a6f9eedcee087f77fd3c927c88f55368f43ff7e0a19891f2642c", - "sha256:5d186f32e52e66994dce4f766884bcb9c68b8da62d61d9d215bfe5fb56d21334", - "sha256:5d20cc348cca3a8aa7312f42ab953a56e15323800ca3ab0706b8cd452a3a056c", - "sha256:6866df152b581f9429020320e5eb9794c8780e90f7ccb021940d7f50ee00ae0b", - "sha256:7d5fe7195c27c32a64955740b949070f21cba664604291c298518d2e255931d2", - "sha256:896530bc9107b226f265effa7ef3f21270f18a2026bc09fed1ebd7b66ddf6375", - "sha256:962bc30480a08d133e631e8dfd4783ab71cc9e33d5d7c1e192f0b7c06397bb88", - "sha256:978631ec51a6bbc0b7e58f23b68a8ce9e5f09721940933e9c217068388789fe5", - "sha256:9b4d4a5dbee05a2c390bf212e78b99434efec37b17a4bff42f50285c5c8c9647", - "sha256:ab0b005721cc0039e885ac3503825661bd9810b15d4f374e473f8c89b7d5460c", - "sha256:af653022a0c25ef2e3ffb2c673a50e5a0d02fecc41608f4954176f1933b12359", - "sha256:b0cc66c74c797e1db750aaa842ad5b8b78e14805a9b5d1348dc603612d3e3ff5", - "sha256:b424563394c369a804ecbee9b06dfb34997f19d00b3518e39f83a5642618397d", - "sha256:c138abae3a12a94c75c10499f1cbae81294a6f983b3af066390adee73f433028", - "sha256:c6cd67722619e4d55fdb42ead64ed8843d64638e9c07f4011163e46bc512cf01", - "sha256:c91fc8e8fd78af553f98bc7f2a1d8db977334e4eea302a4bfd75b9461c2d8904", - "sha256:cad399780053fb383dc067475135e41c9fe7d901a97dd5d9c5dfb5611afc0d7d", - "sha256:cb90f60e03d563ca2445099edf605c16ed1d5b15182d21831f58460c48bffb93", - "sha256:dad80b45c22e05b259e33ddd458e9e2ba099c86ccf4e88db7bbab4b747b18d06", - "sha256:dd3db61b8fe5be220eee484a17233287d0be6932d056cf5738225b9c05ef4fff", - "sha256:e28d62e59a4dbd1d22e747f57d4f00c459af22181f0b2f787ea83f5a876d7c76", - "sha256:e909df4053064a97f1e6565153ff8bb389af12c5c8d29c343308760890560aff", - "sha256:f3ffef566ac88f75967d7abd852ed5f182da252d23fac11b4766da3957766759", - "sha256:fc3c9babc1e1faefd62704bb46a69f359a9819eb0292e40df3fb6e3574715cd4", - "sha256:fe19d8bc5536a91a24a8133328880a41831b6c5df54599a8417b62fe015d3053" + "sha256:04959522f938493042d595a736e7dbdff6eb6cc2339c11465b3ff89343b65f65", + "sha256:128c5edfe5e5938b86b03941e94fac9ee793a94452ad1365c9fc3f4f62216832", + "sha256:1d25aee46d0c6f1a501adcddb2d2fee4b979381346a78558ed13e50aa8a59067", + "sha256:24402210aa54baae71d99441d15bb5a1919c195398a87b563df84468160a65de", + "sha256:258514877e15963bd43b558917bc9f54cf7cf866c38aa576ebf47a77ddbc43a4", + "sha256:35719dc79d4730d30f1c2b6474bd6acda36ae2dfae1e3c16f2051f215df33ce0", + "sha256:397655da831414d165029da9bc483bed2fe0e75dde6a1523ec2fe63f3c46046b", + "sha256:3986ac1dee6def53797289999eabe84798ad7817f3e97779b5061a95b0ee4968", + "sha256:420b1e4109cc95f0e5700eed79908cef9268265c773d3a66f7af1eef53d409ef", + "sha256:42a1e5f98abb6391717978baf9f90dc28a743b7d9be7f0751a6f56a75d14065b", + "sha256:462ad5cb1c148a22b2e3bcc5ad52504dff325d17daf5df8d88c17dda1f75f2a4", + "sha256:506c4ff91eff4f82bdac7633318a526b1d1309fc07ca76a3ad182cb5b686d6d3", + "sha256:5ad9ef796328c5e3c4ceed237a183f5d41d21150f972455a9d926593a1dcb308", + "sha256:5d1c02a14ceb9148cc7816249f64f623fbfee39e8c03b3650d842ad3f34d637e", + "sha256:5e51be372b26ef4ba3de3c167cd3d1022934bc838ae9eaad7e644986d2a3d163", + "sha256:60627cf07e0d9274338521205899337c5d18249db56865f943cbe753aa96f40f", + "sha256:65814c60f8cc400c63131584e3e1fad01235edba2614b61fbfbfa954082db0ee", + "sha256:73510b83623e080a2c35c62c15298096e2a5dc8d51c3b4e1740211839d0dea77", + "sha256:7bbc6ccf49d05ac8f7d7b5e2e2c33830d4fe2061def88210a126d130d7f71a85", + "sha256:80406c3065e2c55d7f49a9550fe0c49b3f12e5bfff5dedb727e319e1afb9bf99", + "sha256:84d4cced91f0f159a7ddacad249cc077e63195c36aac40b4150e7a57e84fffe7", + "sha256:8a469028a86f12eb7d2fe97162d0634026d92a21f3ae0ac87ed1c4a447886c83", + "sha256:91bbcb08347344f810cbe49065914fe048949648f6bd5c2519f34619142bbe85", + "sha256:935ce7e3cfdb53e3536119a542b839bb94ec1ad081013e9ab9b7cfd478b05006", + "sha256:9694078c5d44c157ef3162e3bf3946510b857df5a3955458381d1c7cfc143ddb", + "sha256:a1529d614f44b863a7b480c6d000fe93b59acee9c82ffa027cfadc77521a9f5e", + "sha256:abad9dac36cbf55de6eb49badd4016806b3165d396f64925bf2999bcb67837ba", + "sha256:b36a4695e29fe69215d75960b22577197aca3f7a25b9cf9d165dcfe9d80bc325", + "sha256:b7b412817be92117ec5ed95f880defe9cf18a832e8cafacf0a22337dc1981b4d", + "sha256:c5b1ccd1239f48b7151a65bc6dd54bcfcc15e028c8ac126d3fada09db0e07ef1", + "sha256:cbd5fb06b62bd0721e1170273d3f4d5a277044c47ca27ee257025146c34cbdd1", + "sha256:cdf1a610ef82abb396451862739e3fc93b071c844399e15b90726ef7470eeaf2", + "sha256:cdfbe22376065ffcf8be74dc9a909f032df19bc58a699456a21712d6e5eabfd0", + "sha256:d02c738dacda7dc2a74d1b2b3177042009d5cab7c7079db74afc19e56ca1b455", + "sha256:d151173275e1728cf7839aaa80c34fe550c04ddb27b34f48c232193df8db5842", + "sha256:d23c8ca48e44ee015cd0a54aeccdf9f09004eba9fc96f38c911011d9ff1bd457", + "sha256:d3b99c535a9de0adced13d159c5a9cf65c325601aa30f4be08afd680643e9c15", + "sha256:d5f7520159cd9c2154eb61eb67548ca05c5774d39e9c2c4339fd793fe7d097b2", + "sha256:db0f493b9181c7820c8134437eb8b0b4792085d37dbb24da050476ccb664e59c", + "sha256:e06acf3c99be55aa3b516397fe42f5855597f430add9c17fa46bf2e0fb34c9bb", + "sha256:e4cfd68c5f3e0bfdad0d38e023239b96a2fe84146481852dffbcca442c245aa5", + "sha256:ea42cbe97209df307fdc3b155f1b6fa2577c0defa8f1f7d3be7d31d189108ad4", + "sha256:ebd6daf519b9f189f85c479427bbd6e9c9037862cf8fe89ee35503bd209ed902", + "sha256:f247c8c1a1fb45e12586afbb436ef21ff1e80670b2861a90353d9b025583d246", + "sha256:fbfd0e5f273877695cb93baf14b185f4878128b250cc9f8e617ea0c025dfb022", + "sha256:fc9ab8856ae6cf7c9358430e49b368f3108f050031442eaeb6b9d87e4dcf4e4f", + "sha256:fcd8eac50d9138c1d7fc53a653ba60a2bee81a505f9f8850b6b2888555a45d0e", + "sha256:fdd1736fed309b4300346f88f74cd120c27c56852c3838cab416e7a166f67298", + "sha256:ffca7aa1d00cf7d6469b988c581598f2259e46215e0140af408966a24cf086ce" ], "index": "pypi", - "markers": "python_version >= '3.7' and python_full_version not in '3.9.0, 3.9.1'", - "version": "==44.0.3" + "markers": "python_version >= '3.8' and python_full_version not in '3.9.0, 3.9.1'", + "version": "==46.0.7" }, "docopt": { "hashes": [ @@ -466,20 +478,20 @@ }, "filelock": { "hashes": [ - "sha256:a2241ff4ddde2a7cebddf78e39832509cb045d18ec1a09d7248d6bfc6bfbbe64", - "sha256:fbba7237d6ea277175a32c54bb71ef814a8546d8601269e1bfc388de333974e8" + "sha256:4ed1010aae813c4ee8d9c660e4792475ee60c4a0ba76073ceaf862bd317e3ca6", + "sha256:de9af6712788e7171df1b28b15eba2446c69721433fa427a9bee07b17820a9db" ], "index": "pypi", "markers": "python_version >= '3.10'", - "version": "==3.20.2" + "version": "==3.28.0" }, "flask": { "hashes": [ - "sha256:bf656c15c80190ed628ad08cdfd3aaa35beb087855e2f494910aa3774cc4fd87", - "sha256:ca1d8112ec8a6158cc29ea4858963350011b5c846a414cdb7a954aa9e967d03c" + "sha256:0ef0e52b8a9cd932855379197dd8f94047b359ca0a78695144304cb45f87c9eb", + "sha256:f4bcbefc124291925f1a26446da31a5178f9483862233b23c0c96a20701f670c" ], "markers": "python_version >= '3.9'", - "version": "==3.1.2" + "version": "==3.1.3" }, "h11": { "hashes": [ @@ -655,33 +667,33 @@ "mitmproxy": { "git": "https://github.com/citusdata/mitmproxy.git", "hashes": [ - "sha256:95db7b57b21320a0c76e59e1d6644daaa431291cdf89419608301424651199b4" + "sha256:7b111ba3b83b34b0d9b653044685db7c3f5fbc63b39b8f06439642da83910713" ], "index": "pypi", "markers": "python_version >= '3.12'", - "ref": "30b588eb1b7c66da233d7c471ec23f2b632ed531", - "version": "==11.0.2" + "ref": "70bad9a3c098f605e5f8b25553e5db5334018ff1", + "version": "==12.2.2" }, "mitmproxy-linux": { "hashes": [ - "sha256:0bea9353c71ebfd2174f6730b3fd0fdff3adea1aa15450035bed3b83e36ef455", - "sha256:2238455e65970382825baed2e998601ea82d8dcaae51bd8ee0859d596524a822", - "sha256:fbcb25316e95d0b2b5ced4e0cc3d90fdb1b7169300a005cc79339894d665363a" + "sha256:94b10fee02aa42287739623cef921e1a53955005d45c9e2fa309ae9f0bf8d37d", + "sha256:b4413e27c692f30036ad6d73432826e728ede026fac8e51651d0c545dd0177f2", + "sha256:ee842865a05f69196004ddcb29d50af0602361d9d6acee04f370f7e01c3674e8" ], "markers": "python_version >= '3.12'", - "version": "==0.12.8" + "version": "==0.12.9" }, "mitmproxy-rs": { "hashes": [ - "sha256:14ea236d0950ab35d667b78b5fe15d43e7345e166e22144624a1283edc78443e", - "sha256:16afd0fc1a00d586ffe2027d217908c3e0389d7d0897eccda6e59fda991e89ba", - "sha256:739591f696cf29913302a72fa9644cf97228774604304a2ea3987fe5588d231c", - "sha256:b0ead519f5a4ab019e7912544c0642f28f8336036ef1480e42a772a8cc947550", - "sha256:c5b0799808a4de0ee60e8f350043820ad56eea738ce3ce25d5c6faaa245b6c9a" + "sha256:1fb9fb4aac9ecb82e2c3c5c439ef5e4961be7934d80ade5e9a99c0a944b8ea2f", + "sha256:1fd716e87da8be3c62daa4325a5ff42bedd951fb8614c5f66caa94b7c21e2593", + "sha256:245922663440330c4b5a36d0194ed559b1dbd5e38545db2eb947180ed12a5e92", + "sha256:afeb3a2da2bc26474e1a2febaea4432430c5fde890dfce33bc4c1e65e6baef1b", + "sha256:c6ffc35c002c675cac534442d92d1cdebd66fafd63754ad33b92ae968ea6e449" ], "index": "pypi", "markers": "python_version >= '3.12'", - "version": "==0.12.8" + "version": "==0.12.9" }, "msgpack": { "hashes": [ @@ -753,11 +765,11 @@ }, "packaging": { "hashes": [ - "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", - "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f" + "sha256:5d9c0669c6285e491e0ced2eee587eaf67b670d94a19e94e3984a481aba6802f", + "sha256:f042152b681c4bfac5cae2742a55e103d27ab2ec0f3d88037136b6bfe7c9c5de" ], "markers": "python_version >= '3.8'", - "version": "==25.0" + "version": "==26.1" }, "pluggy": { "hashes": [ @@ -769,12 +781,12 @@ }, "psycopg": { "hashes": [ - "sha256:3e94bc5f4690247d734599af56e51bae8e0db8e4311ea413f801fef82b14a99b", - "sha256:707a67975ee214d200511177a6a80e56e654754c9afca06a7194ea6bbfde9ca7" + "sha256:5e9a47458b3c1583326513b2556a2a9473a1001a56c9efe9e587245b43148dd9", + "sha256:f96525a72bcfade6584ab17e89de415ff360748c766f0106959144dcbb38c698" ], "index": "pypi", "markers": "python_version >= '3.10'", - "version": "==3.3.2" + "version": "==3.3.3" }, "publicsuffix2": { "hashes": [ @@ -785,11 +797,11 @@ }, "pyasn1": { "hashes": [ - "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", - "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034" + "sha256:697a8ecd6d98891189184ca1fa05d1bb00e2f84b5977c481452050549c8a72cf", + "sha256:a80184d120f0864a52a073acc6fc642847d0be408e7c7252f31390c0f4eadcde" ], "markers": "python_version >= '3.8'", - "version": "==0.6.1" + "version": "==0.6.3" }, "pyasn1-modules": { "hashes": [ @@ -801,53 +813,53 @@ }, "pycparser": { "hashes": [ - "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", - "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934" + "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29", + "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992" ], - "markers": "python_version >= '3.8'", - "version": "==2.23" + "markers": "python_version >= '3.10'", + "version": "==3.0" }, "pygments": { "hashes": [ - "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", - "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b" + "sha256:6757cd03768053ff99f3039c1a36d6c0aa0b263438fcab17520b30a303a82b5f", + "sha256:81a9e26dd42fd28a23a2d169d86d7ac03b46e2f8b59ed4698fb4785f946d0176" ], - "markers": "python_version >= '3.8'", - "version": "==2.19.2" + "markers": "python_version >= '3.9'", + "version": "==2.20.0" }, "pylsqpack": { "hashes": [ - "sha256:2d91d87672beb0beff6a866dbf35e8b45791d8dffcd5cfd9d8cc397001101fd5", - "sha256:2f9a2ef59588d32cd02847c6b9d7140440f67a0751da99f96a2ff4edadc85eae", - "sha256:3f7d78352e764732ac1a9ab109aa84e003996a7d64de7098cb20bdc007cf7613", - "sha256:498b374b16b51532997998c4cf4021161d2a611f5ea6b02ad95ca99815c54abf", - "sha256:4e5b0b5ec92be6e5e6eb1c52d45271c5c7f8f2a2cd8c672ab240ac2cd893cd26", - "sha256:829a2466b80af9766cf0ad795b866796a4000cec441a0eb222357efd01ec6d42", - "sha256:8a9e25c5a98a0959c6511aaf7d1a6ac0d6146be349a8c3c09fec2e5250cb2901", - "sha256:8ba86c384dcf8952cef190f8cc4d61cb2a8e4eeaf25093c6aa38b9b696ac82dc", - "sha256:978497811bb58cf7ae11c0e1d4cf9bdf6bccef77556d039ae1836b458cb235fc", - "sha256:b516d56078a16592596ea450ea20e9a54650af759754e2e807b7046be13c83ee", - "sha256:db03232c85855cb03226447e41539f8631d7d4e5483d48206e30d470a9cb07a1", - "sha256:f55b126940d8b3157331f123d4428d703a698a6db65a6a7891f7ec1b90c86c56" + "sha256:23b4d8af48836893beac356c10ca268161953de5bf9ed691526a93f5c82433e9", + "sha256:54978a9879471596d84bbad5e67d727014048926bc5bb2dac0eb3701b48c5ac9", + "sha256:6024854eb16d32803d4890fb90a73b9348c74b61c0770680aefaaa75f8456e8c", + "sha256:8da12be7b35b7c9a8cf73a4c077f72e5022a311f80a401c79904213376f2d767", + "sha256:8ec455f44614228f89e38d40c1b1e37895620e20ec6b21e3b562fa8b79a23890", + "sha256:8edf48d0a023cd3629b2c4aaccac9b79a46d566c0f61e7416b5678228433763d", + "sha256:b6a8bb42127d5ece8d301a673c8205df25b73b69f8c46b9f0c3034588de1789a", + "sha256:c3e2327af25ee616ce4483a8748f0957cf017cbca82d58ed15efea68f70f94ff", + "sha256:caf63ddc2e581c764d17432893acce02c5c29ff879d77c2abf1e26aa4eeb831b", + "sha256:e3dc5f146fd456b50b227858aed59faa0ff8445aa426e69bb4e50d46c487aab0", + "sha256:e3f977d419c60c1d6c2240e6d7a52df820d37eb8c36b4057113bcd7859f53e2c", + "sha256:e7d956dbc8f7d597b237b9157d0a16bc7c655a1b031239763c18dc8582aff8cc" ], "markers": "python_version >= '3.10'", - "version": "==0.3.23" + "version": "==0.3.24" }, "pyopenssl": { "hashes": [ - "sha256:2b11f239acc47ac2e5aca04fd7fa829800aeee22a2eb30d744572a157bd8a1ab", - "sha256:8d031884482e0c67ee92bf9a4d8cceb08d92aba7136432ffb0703c5280fc205b" + "sha256:1fda6fc034d5e3d179d39e59c1895c9faeaf40a79de5fc4cbbfbe0d36f4a77b6", + "sha256:c981cb0a3fd84e8602d7afc209522773b94c1c2446a3c710a75b06fe1beae329" ], "markers": "python_version >= '3.7'", - "version": "==25.1.0" + "version": "==25.3.0" }, "pyparsing": { "hashes": [ - "sha256:2df8d5b7b2802ef88e8d016a2eb9c7aeaa923529cd251ed0fe4608275d4105b6", - "sha256:e38a4f02064cf41fe6593d328d0512495ad1f3d8a91c4f73fc401b3079a59a5e" + "sha256:850ba148bd908d7e2411587e247a1e4f0327839c40e2e5e6d05a007ecc69911d", + "sha256:c777f4d763f140633dcb6d8a3eda953bf7a214dc4eff598413c070bcdc117cbc" ], "markers": "python_version >= '3.9'", - "version": "==3.2.5" + "version": "==3.3.2" }, "pyperclip": { "hashes": [ @@ -858,12 +870,12 @@ }, "pytest": { "hashes": [ - "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", - "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11" + "sha256:2c5efc453d45394fdd706ade797c0a81091eccd1d6e4bccfcd476e2b8e0ab5d9", + "sha256:b86ada508af81d19edeb213c681b1d48246c1a91d304c6c81a427674c17eb91c" ], "index": "pypi", "markers": "python_version >= '3.10'", - "version": "==9.0.2" + "version": "==9.0.3" }, "pytest-asyncio": { "hashes": [ @@ -983,78 +995,11 @@ }, "ruamel.yaml": { "hashes": [ - "sha256:048f26d64245bae57a4f9ef6feb5b552a386830ef7a826f235ffb804c59efbba", - "sha256:a6e587512f3c998b2225d68aa1f35111c29fad14aed561a26e73fab729ec5e5a" - ], - "markers": "python_version >= '3.8'", - "version": "==0.18.16" - }, - "ruamel.yaml.clib": { - "hashes": [ - "sha256:014181cdec565c8745b7cbc4de3bf2cc8ced05183d986e6d1200168e5bb59490", - "sha256:04d21dc9c57d9608225da28285900762befbb0165ae48482c15d8d4989d4af14", - "sha256:05c70f7f86be6f7bee53794d80050a28ae7e13e4a0087c1839dcdefd68eb36b6", - "sha256:0ba6604bbc3dfcef844631932d06a1a4dcac3fee904efccf582261948431628a", - "sha256:11e5499db1ccbc7f4b41f0565e4f799d863ea720e01d3e99fa0b7b5fcd7802c9", - "sha256:1b45498cc81a4724a2d42273d6cfc243c0547ad7c6b87b4f774cb7bcc131c98d", - "sha256:1bb7b728fd9f405aa00b4a0b17ba3f3b810d0ccc5f77f7373162e9b5f0ff75d5", - "sha256:1f66f600833af58bea694d5892453f2270695b92200280ee8c625ec5a477eed3", - "sha256:27dc656e84396e6d687f97c6e65fb284d100483628f02d95464fd731743a4afe", - "sha256:2812ff359ec1f30129b62372e5f22a52936fac13d5d21e70373dbca5d64bb97c", - "sha256:2b216904750889133d9222b7b873c199d48ecbb12912aca78970f84a5aa1a4bc", - "sha256:331fb180858dd8534f0e61aa243b944f25e73a4dae9962bd44c46d1761126bbf", - "sha256:3cb75a3c14f1d6c3c2a94631e362802f70e83e20d1f2b2ef3026c05b415c4900", - "sha256:3eb199178b08956e5be6288ee0b05b2fb0b5c1f309725ad25d9c6ea7e27f962a", - "sha256:424ead8cef3939d690c4b5c85ef5b52155a231ff8b252961b6516ed7cf05f6aa", - "sha256:45702dfbea1420ba3450bb3dd9a80b33f0badd57539c6aac09f42584303e0db6", - "sha256:468858e5cbde0198337e6a2a78eda8c3fb148bdf4c6498eaf4bc9ba3f8e780bd", - "sha256:46895c17ead5e22bea5e576f1db7e41cb273e8d062c04a6a49013d9f60996c25", - "sha256:46e4cc8c43ef6a94885f72512094e482114a8a706d3c555a34ed4b0d20200600", - "sha256:480894aee0b29752560a9de46c0e5f84a82602f2bc5c6cde8db9a345319acfdf", - "sha256:4b293a37dc97e2b1e8a1aec62792d1e52027087c8eea4fc7b5abd2bdafdd6642", - "sha256:4be366220090d7c3424ac2b71c90d1044ea34fca8c0b88f250064fd06087e614", - "sha256:4d1032919280ebc04a80e4fb1e93f7a738129857eaec9448310e638c8bccefcf", - "sha256:4d3b58ab2454b4747442ac76fab66739c72b1e2bb9bd173d7694b9f9dbc9c000", - "sha256:4dcec721fddbb62e60c2801ba08c87010bd6b700054a09998c4d09c08147b8fb", - "sha256:512571ad41bba04eac7268fe33f7f4742210ca26a81fe0c75357fa682636c690", - "sha256:542d77b72786a35563f97069b9379ce762944e67055bea293480f7734b2c7e5e", - "sha256:56ea19c157ed8c74b6be51b5fa1c3aff6e289a041575f0556f66e5fb848bb137", - "sha256:5d3c9210219cbc0f22706f19b154c9a798ff65a6beeafbf77fc9c057ec806f7d", - "sha256:5fea0932358e18293407feb921d4f4457db837b67ec1837f87074667449f9401", - "sha256:617d35dc765715fa86f8c3ccdae1e4229055832c452d4ec20856136acc75053f", - "sha256:64da03cbe93c1e91af133f5bec37fd24d0d4ba2418eaf970d7166b0a26a148a2", - "sha256:65f48245279f9bb301d1276f9679b82e4c080a1ae25e679f682ac62446fac471", - "sha256:6f1d38cbe622039d111b69e9ca945e7e3efebb30ba998867908773183357f3ed", - "sha256:713cd68af9dfbe0bb588e144a61aad8dcc00ef92a82d2e87183ca662d242f524", - "sha256:71845d377c7a47afc6592aacfea738cc8a7e876d586dfba814501d8c53c1ba60", - "sha256:753faf20b3a5906faf1fc50e4ddb8c074cb9b251e00b14c18b28492f933ac8ef", - "sha256:7e74ea87307303ba91073b63e67f2c667e93f05a8c63079ee5b7a5c8d0d7b043", - "sha256:88eea8baf72f0ccf232c22124d122a7f26e8a24110a0273d9bcddcb0f7e1fa03", - "sha256:923816815974425fbb1f1bf57e85eca6e14d8adc313c66db21c094927ad01815", - "sha256:9b6f7d74d094d1f3a4e157278da97752f16ee230080ae331fcc219056ca54f77", - "sha256:a8220fd4c6f98485e97aea65e1df76d4fed1678ede1fe1d0eed2957230d287c4", - "sha256:ab0df0648d86a7ecbd9c632e8f8d6b21bb21b5fc9d9e095c796cacf32a728d2d", - "sha256:ac9b8d5fa4bb7fd2917ab5027f60d4234345fd366fe39aa711d5dca090aa1467", - "sha256:badd1d7283f3e5894779a6ea8944cc765138b96804496c91812b2829f70e18a7", - "sha256:bdc06ad71173b915167702f55d0f3f027fc61abd975bd308a0968c02db4a4c3e", - "sha256:bf0846d629e160223805db9fe8cc7aec16aaa11a07310c50c8c7164efa440aec", - "sha256:bfd309b316228acecfa30670c3887dcedf9b7a44ea39e2101e75d2654522acd4", - "sha256:c583229f336682b7212a43d2fa32c30e643d3076178fb9f7a6a14dde85a2d8bd", - "sha256:cb15a2e2a90c8475df45c0949793af1ff413acfb0a716b8b94e488ea95ce7cff", - "sha256:d290eda8f6ada19e1771b54e5706b8f9807e6bb08e873900d5ba114ced13e02c", - "sha256:da3d6adadcf55a93c214d23941aef4abfd45652110aed6580e814152f385b862", - "sha256:dcc7f3162d3711fd5d52e2267e44636e3e566d1e5675a5f0b30e98f2c4af7974", - "sha256:def5663361f6771b18646620fca12968aae730132e104688766cf8a3b1d65922", - "sha256:e5e9f630c73a490b758bf14d859a39f375e6999aea5ddd2e2e9da89b9953486a", - "sha256:e9fde97ecb7bb9c41261c2ce0da10323e9227555c674989f8d9eb7572fc2098d", - "sha256:ef71831bd61fbdb7aa0399d5c4da06bea37107ab5c79ff884cc07f2450910262", - "sha256:f4421ab780c37210a07d138e56dd4b51f8642187cdfb433eb687fe8c11de0144", - "sha256:f6d3655e95a80325b84c4e14c080b2470fe4f33b6846f288379ce36154993fb1", - "sha256:fd4c928ddf6bce586285daa6d90680b9c291cfd045fc40aad34e445d57b1bf51", - "sha256:fe239bdfdae2302e93bd6e8264bd9b71290218fff7084a9db250b55caaccf43f" + "sha256:27592957fedf6e0b62f281e96effd28043345e0e66001f97683aa9a40c667c93", + "sha256:53eb66cd27849eff968ebf8f0bf61f46cdac2da1d1f3576dd4ccee9b25c31993" ], "markers": "python_version >= '3.9'", - "version": "==0.2.15" + "version": "==0.19.1" }, "service-identity": { "hashes": [ @@ -1073,22 +1018,20 @@ }, "tornado": { "hashes": [ - "sha256:06ceb1300fd70cb20e43b1ad8aaee0266e69e7ced38fa910ad2e03285009ce7c", - "sha256:2436822940d37cde62771cff8774f4f00b3c8024fe482e16ca8387b8a2724db6", - "sha256:583a52c7aa94ee046854ba81d9ebb6c81ec0fd30386d96f7640c96dad45a03ef", - "sha256:74db443e0f5251be86cbf37929f84d8c20c27a355dd452a5cfa2aada0d001ec4", - "sha256:ab53c8f9a0fa351e2c0741284e06c7a45da86afb544133201c5cc8578eb076a0", - "sha256:b0fe179f28d597deab2842b86ed4060deec7388f1fd9c1b4a41adf8af058907e", - "sha256:b186e85d1e3536d69583d2298423744740986018e393d0321df7340e71898882", - "sha256:b5e735ab2889d7ed33b32a459cac490eda71a1ba6857b0118de476ab6c366c04", - "sha256:c6f29e94d9b37a95013bb669616352ddb82e3bfe8326fccee50583caebc8a5f0", - "sha256:d6c33dc3672e3a1f3618eb63b7ef4683a7688e7b9e6e8f0d9aa5726360a004af", - "sha256:e56a5af51cc30dd2cae649429af65ca2f6571da29504a07995175df14c18f35f", - "sha256:e792706668c87709709c18b353da1f7662317b563ff69f00bab83595940c7108" + "sha256:192b8f3ea91bd7f1f50c06955416ed76c6b72f96779b962f07f911b91e8d30e9", + "sha256:2c9a876e094109333f888539ddb2de4361743e5d21eece20688e3e351e4990a6", + "sha256:36abed1754faeb80fbd6e64db2758091e1320f6bba74a4cf8c09cd18ccce8aca", + "sha256:3f54aa540bdbfee7b9eb268ead60e7d199de5021facd276819c193c0fb28ea4e", + "sha256:435319e9e340276428bbdb4e7fa732c2d399386d1de5686cb331ec8eee754f07", + "sha256:487dc9cc380e29f58c7ab88f9e27cdeef04b2140862e5076a66fb6bb68bb1bfa", + "sha256:6443a794ba961a9f619b1ae926a2e900ac20c34483eea67be4ed8f1e58d3ef7b", + "sha256:65a7f1d46d4bb41df1ac99f5fcb685fb25c7e61613742d5108b010975a9a6521", + "sha256:dd3eafaaeec1c7f2f8fdcd5f964e8907ad788fe8a5a32c4426fbbdda621223b7", + "sha256:e74c92e8e65086b338fd56333fb9a68b9f6f2fe7ad532645a290a464bcf46be5" ], "index": "pypi", "markers": "python_version >= '3.9'", - "version": "==6.5.2" + "version": "==6.5.5" }, "typing-extensions": { "hashes": [ @@ -1101,36 +1044,36 @@ }, "urwid": { "hashes": [ - "sha256:300804dd568cda5aa1c5b204227bd0cfe7a62cef2d00987c5eb2e4e64294ed9b", - "sha256:ede36ecc99a293bbb4b5e5072c7b7bb943eb3bed17decf89b808209ed2dead15" + "sha256:24be27ffafdb68c09cd95dc21b60ccfd02843320b25ce5feee1708b34fad5a23", + "sha256:f188144261224fdfc9b56b4222869bd0eac90fd7895cf1e376129cdc7e13bc84" ], "markers": "python_full_version >= '3.9.0'", - "version": "==3.0.3" + "version": "==3.0.5" }, "wcwidth": { "hashes": [ - "sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605", - "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1" + "sha256:1a3a1e510b553315f8e146c54764f4fb6264ffad731b3d78088cdb1478ffbdad", + "sha256:cdc4e4262d6ef9a1a57e018384cbeb1208d8abbc64176027e2c2455c81313159" ], - "markers": "python_version >= '3.6'", - "version": "==0.2.14" + "markers": "python_version >= '3.8'", + "version": "==0.6.0" }, "werkzeug": { "hashes": [ - "sha256:2ad50fb9ed09cc3af22c54698351027ace879a0b60a3b5edf5730b2f7d876905", - "sha256:cd3cd98b1b92dc3b7b3995038826c68097dcb16f9baa63abe35f20eafeb9fe5e" + "sha256:210c6bede5a420a913956b4791a7f4d6843a43b6fcee4dfa08a65e93007d0d25", + "sha256:7ddf3357bb9564e407607f988f683d72038551200c704012bb9a4c523d42f131" ], "index": "pypi", "markers": "python_version >= '3.9'", - "version": "==3.1.4" + "version": "==3.1.6" }, "wsproto": { "hashes": [ - "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065", - "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736" + "sha256:61eea322cdf56e8cc904bd3ad7573359a242ba65688716b0710a5eb12beab584", + "sha256:b86885dcf294e15204919950f666e06ffc6c7c114ca900b060d6e16293528294" ], - "markers": "python_full_version >= '3.7.0'", - "version": "==1.2.0" + "markers": "python_version >= '3.10'", + "version": "==1.3.2" }, "zstandard": { "hashes": [ @@ -1242,48 +1185,53 @@ "develop": { "attrs": { "hashes": [ - "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", - "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373" + "sha256:c647aa4a12dfbad9333ca4e71fe62ddc36f4e63b2d260a37a8b83d2f043ac309", + "sha256:d03ceb89cb322a8fd706d4fb91940737b6642aa36998fe130a9bc96c985eff32" ], "markers": "python_version >= '3.9'", - "version": "==25.4.0" + "version": "==26.1.0" }, "black": { "hashes": [ - "sha256:14b3502784f09ce2443830e3133dacf2c0110d45191ed470ecb04d0f5f6fcb0f", - "sha256:17374989640fbca88b6a448129cd1745c5eb8d9547b464f281b251dd00155ccd", - "sha256:1c536fcf674217e87b8cc3657b81809d3c085d7bf3ef262ead700da345bfa6ea", - "sha256:1cbacacb19e922a1d75ef2b6ccaefcd6e93a2c05ede32f06a21386a04cedb981", - "sha256:1f93102e0c5bb3907451063e08b9876dbeac810e7da5a8bfb7aeb5a9ef89066b", - "sha256:2cd9c95431d94adc56600710f8813ee27eea544dd118d45896bb734e9d7a0dc7", - "sha256:30d2c30dc5139211dda799758559d1b049f7f14c580c409d6ad925b74a4208a8", - "sha256:394d4ddc64782e51153eadcaaca95144ac4c35e27ef9b0a42e121ae7e57a9175", - "sha256:3bb2b7a1f7b685f85b11fed1ef10f8a9148bceb49853e47a294a3dd963c1dd7d", - "sha256:4007b1393d902b48b36958a216c20c4482f601569d19ed1df294a496eb366392", - "sha256:5a2221696a8224e335c28816a9d331a6c2ae15a2ee34ec857dcf3e45dbfa99ad", - "sha256:63f626344343083322233f175aaf372d326de8436f5928c042639a4afbbf1d3f", - "sha256:649fff99a20bd06c6f727d2a27f401331dc0cc861fb69cde910fe95b01b5928f", - "sha256:680359d932801c76d2e9c9068d05c6b107f2584b2a5b88831c83962eb9984c1b", - "sha256:846ea64c97afe3bc677b761787993be4991810ecc7a4a937816dd6bddedc4875", - "sha256:b5e39e0fae001df40f95bd8cc36b9165c5e2ea88900167bddf258bacef9bbdc3", - "sha256:ccfa1d0cb6200857f1923b602f978386a3a2758a65b52e0950299ea014be6800", - "sha256:d37d422772111794b26757c5b55a3eade028aa3fde43121ab7b673d050949d65", - "sha256:ddacb691cdcdf77b96f549cf9591701d8db36b2f19519373d60d31746068dbf2", - "sha256:e6668650ea4b685440857138e5fe40cde4d652633b1bdffc62933d0db4ed9812", - "sha256:f9da3333530dbcecc1be13e69c250ed8dfa67f43c4005fb537bb426e19200d50", - "sha256:fe4d6476887de70546212c99ac9bd803d90b42fc4767f058a0baa895013fbb3e" + "sha256:0126ae5b7c09957da2bdbd91a9ba1207453feada9e9fe51992848658c6c8e01c", + "sha256:0f76ff19ec5297dd8e66eb64deda23631e642c9393ab592826fd4bdc97a4bce7", + "sha256:28ef38aee69e4b12fda8dba75e21f9b4f979b490c8ac0baa7cb505369ac9e1ff", + "sha256:2bd5aa94fc267d38bb21a70d7410a89f1a1d318841855f698746f8e7f51acd1b", + "sha256:2c50f5063a9641c7eed7795014ba37b0f5fa227f3d408b968936e24bc0566b07", + "sha256:2d6bfaf7fd0993b420bed691f20f9492d53ce9a2bcccea4b797d34e947318a78", + "sha256:41cd2012d35b47d589cb8a16faf8a32ef7a336f56356babd9fcf70939ad1897f", + "sha256:474c27574d6d7037c1bc875a81d9be0a9a4f9ee95e62800dab3cfaadbf75acd5", + "sha256:5602bdb96d52d2d0672f24f6ffe5218795736dd34807fd0fd55ccd6bf206168b", + "sha256:5e9d0d86df21f2e1677cc4bd090cd0e446278bcbbe49bf3659c308c3e402843e", + "sha256:5ed0ca58586c8d9a487352a96b15272b7fa55d139fc8496b519e78023a8dab0a", + "sha256:6c54a4a82e291a1fee5137371ab488866b7c86a3305af4026bdd4dc78642e1ac", + "sha256:6e131579c243c98f35bce64a7e08e87fb2d610544754675d4a0e73a070a5aa3a", + "sha256:855822d90f884905362f602880ed8b5df1b7e3ee7d0db2502d4388a954cc8c54", + "sha256:86a8b5035fce64f5dcd1b794cf8ec4d31fe458cf6ce3986a30deb434df82a1d2", + "sha256:8a33d657f3276328ce00e4d37fe70361e1ec7614da5d7b6e78de5426cb56332f", + "sha256:92c0ec1f2cc149551a2b7b47efc32c866406b6891b0ee4625e95967c8f4acfb1", + "sha256:9a5e9f45e5d5e1c5b5c29b3bd4265dcc90e8b92cf4534520896ed77f791f4da5", + "sha256:afc622538b430aa4c8c853f7f63bc582b3b8030fd8c80b70fb5fa5b834e575c2", + "sha256:b07fc0dab849d24a80a29cfab8d8a19187d1c4685d8a5e6385a5ce323c1f015f", + "sha256:b5e6f89631eb88a7302d416594a32faeee9fb8fb848290da9d0a5f2903519fc1", + "sha256:bf9bf162ed91a26f1adba8efda0b573bc6924ec1408a52cc6f82cb73ec2b142c", + "sha256:c7e72339f841b5a237ff14f7d3880ddd0fc7f98a1199e8c4327f9a4f478c1839", + "sha256:ddb113db38838eb9f043623ba274cfaf7d51d5b0c22ecb30afe58b1bb8322983", + "sha256:dfdd51fc3e64ea4f35873d1b3fb25326773d55d2329ff8449139ebaad7357efb", + "sha256:f1cd08e99d2f9317292a311dfe578fd2a24b15dbce97792f9c4d752275c1fa56", + "sha256:f89f2ab047c76a9c03f78d0d66ca519e389519902fa27e7a91117ef7611c0568" ], "index": "pypi", - "markers": "python_version >= '3.9'", - "version": "==24.10.0" + "markers": "python_version >= '3.10'", + "version": "==26.3.1" }, "click": { "hashes": [ - "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", - "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6" + "sha256:14162b8b3b3550a7d479eafa77dfd3c38d9dc8951f6f69c78913a8f9a7540fd5", + "sha256:1924d2c27c5653561cd2cae4548d1406039cb79b858b747cfea24924bbc1616d" ], "markers": "python_version >= '3.10'", - "version": "==8.3.1" + "version": "==8.3.2" }, "flake8": { "hashes": [ @@ -1305,12 +1253,12 @@ }, "isort": { "hashes": [ - "sha256:1bcabac8bc3c36c7fb7b98a76c8abb18e0f841a3ba81decac7691008592499c1", - "sha256:5513527951aadb3ac4292a41a16cbc50dd1642432f5e8c20057d414bdafb4187" + "sha256:171ac4ff559cdc060bcfff550bc8404a486fee0caab245679c2abe7cb253c78d", + "sha256:28b89bc70f751b559aeca209e6120393d43fbe2490de0559662be7a9787e3d75" ], "index": "pypi", "markers": "python_full_version >= '3.10.0'", - "version": "==7.0.0" + "version": "==8.0.1" }, "mccabe": { "hashes": [ @@ -1330,27 +1278,27 @@ }, "packaging": { "hashes": [ - "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", - "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f" + "sha256:5d9c0669c6285e491e0ced2eee587eaf67b670d94a19e94e3984a481aba6802f", + "sha256:f042152b681c4bfac5cae2742a55e103d27ab2ec0f3d88037136b6bfe7c9c5de" ], "markers": "python_version >= '3.8'", - "version": "==25.0" + "version": "==26.1" }, "pathspec": { "hashes": [ - "sha256:62f8558917908d237d399b9b338ef455a814801a4688bc41074b25feefd93472", - "sha256:fa32b1eb775ed9ba8d599b22c5f906dc098113989da2c00bf8b210078ca7fb92" + "sha256:0210e2ae8a21a9137c0d470578cb0e595af87edaa6ebf12ff176f14a02e0e645", + "sha256:fb6ae2fd4e7c921a165808a552060e722767cfa526f99ca5156ed2ce45a5c723" ], "markers": "python_version >= '3.9'", - "version": "==1.0.2" + "version": "==1.0.4" }, "platformdirs": { "hashes": [ - "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", - "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31" + "sha256:3bfa75b0ad0db84096ae777218481852c0ebc6c727b3168c1b9e0118e458cf0a", + "sha256:e61adb1d5e5cb3441b4b7710bea7e4c12250ca49439228cc1021c00dcfac0917" ], "markers": "python_version >= '3.10'", - "version": "==4.5.1" + "version": "==4.9.6" }, "pycodestyle": { "hashes": [ @@ -1367,6 +1315,54 @@ ], "markers": "python_version >= '3.9'", "version": "==3.4.0" + }, + "pytokens": { + "hashes": [ + "sha256:0fc71786e629cef478cbf29d7ea1923299181d0699dbe7c3c0f4a583811d9fc1", + "sha256:11edda0942da80ff58c4408407616a310adecae1ddd22eef8c692fe266fa5009", + "sha256:140709331e846b728475786df8aeb27d24f48cbcf7bcd449f8de75cae7a45083", + "sha256:24afde1f53d95348b5a0eb19488661147285ca4dd7ed752bbc3e1c6242a304d1", + "sha256:26cef14744a8385f35d0e095dc8b3a7583f6c953c2e3d269c7f82484bf5ad2de", + "sha256:27b83ad28825978742beef057bfe406ad6ed524b2d28c252c5de7b4a6dd48fa2", + "sha256:292052fe80923aae2260c073f822ceba21f3872ced9a68bb7953b348e561179a", + "sha256:29d1d8fb1030af4d231789959f21821ab6325e463f0503a61d204343c9b355d1", + "sha256:2a44ed93ea23415c54f3face3b65ef2b844d96aeb3455b8a69b3df6beab6acc5", + "sha256:30f51edd9bb7f85c748979384165601d028b84f7bd13fe14d3e065304093916a", + "sha256:34bcc734bd2f2d5fe3b34e7b3c0116bfb2397f2d9666139988e7a3eb5f7400e3", + "sha256:3ad72b851e781478366288743198101e5eb34a414f1d5627cdd585ca3b25f1db", + "sha256:3f901fe783e06e48e8cbdc82d631fca8f118333798193e026a50ce1b3757ea68", + "sha256:42f144f3aafa5d92bad964d471a581651e28b24434d184871bd02e3a0d956037", + "sha256:4a14d5f5fc78ce85e426aa159489e2d5961acf0e47575e08f35584009178e321", + "sha256:4a58d057208cb9075c144950d789511220b07636dd2e4708d5645d24de666bdc", + "sha256:4e691d7f5186bd2842c14813f79f8884bb03f5995f0575272009982c5ac6c0f7", + "sha256:5502408cab1cb18e128570f8d598981c68a50d0cbd7c61312a90507cd3a1276f", + "sha256:584c80c24b078eec1e227079d56dc22ff755e0ba8654d8383b2c549107528918", + "sha256:5ad948d085ed6c16413eb5fec6b3e02fa00dc29a2534f088d3302c47eb59adf9", + "sha256:670d286910b531c7b7e3c0b453fd8156f250adb140146d234a82219459b9640c", + "sha256:682fa37ff4d8e95f7df6fe6fe6a431e8ed8e788023c6bcc0f0880a12eab80ad1", + "sha256:6d6c4268598f762bc8e91f5dbf2ab2f61f7b95bdc07953b602db879b3c8c18e1", + "sha256:79fc6b8699564e1f9b521582c35435f1bd32dd06822322ec44afdeba666d8cb3", + "sha256:8bdb9d0ce90cbf99c525e75a2fa415144fd570a1ba987380190e8b786bc6ef9b", + "sha256:8fcb9ba3709ff77e77f1c7022ff11d13553f3c30299a9fe246a166903e9091eb", + "sha256:941d4343bf27b605e9213b26bfa1c4bf197c9c599a9627eb7305b0defcfe40c1", + "sha256:967cf6e3fd4adf7de8fc73cd3043754ae79c36475c1c11d514fc72cf5490094a", + "sha256:970b08dd6b86058b6dc07efe9e98414f5102974716232d10f32ff39701e841c4", + "sha256:97f50fd18543be72da51dd505e2ed20d2228c74e0464e4262e4899797803d7fa", + "sha256:9bd7d7f544d362576be74f9d5901a22f317efc20046efe2034dced238cbbfe78", + "sha256:add8bf86b71a5d9fb5b89f023a80b791e04fba57960aa790cc6125f7f1d39dfe", + "sha256:b35d7e5ad269804f6697727702da3c517bb8a5228afa450ab0fa787732055fc9", + "sha256:b49750419d300e2b5a3813cf229d4e5a4c728dae470bcc89867a9ad6f25a722d", + "sha256:d31b97b3de0f61571a124a00ffe9a81fb9939146c122c11060725bd5aea79975", + "sha256:d70e77c55ae8380c91c0c18dea05951482e263982911fc7410b1ffd1dadd3440", + "sha256:d9907d61f15bf7261d7e775bd5d7ee4d2930e04424bab1972591918497623a16", + "sha256:da5baeaf7116dced9c6bb76dc31ba04a2dc3695f3d9f74741d7910122b456edc", + "sha256:dc74c035f9bfca0255c1af77ddd2d6ae8419012805453e4b0e7513e17904545d", + "sha256:dcafc12c30dbaf1e2af0490978352e0c4041a7cde31f4f81435c2a5e8b9cabb6", + "sha256:ee44d0f85b803321710f9239f335aafe16553b39106384cef8e6de40cb4ef2f6", + "sha256:f66a6bbe741bd431f6d741e617e0f39ec7257ca1f89089593479347cc4d13324" + ], + "markers": "python_version >= '3.8'", + "version": "==0.4.1" } } } diff --git a/src/test/regress/citus_tests/arbitrary_configs/citus_arbitrary_configs.py b/src/test/regress/citus_tests/arbitrary_configs/citus_arbitrary_configs.py index 52924aa11af..26e32d27c6a 100755 --- a/src/test/regress/citus_tests/arbitrary_configs/citus_arbitrary_configs.py +++ b/src/test/regress/citus_tests/arbitrary_configs/citus_arbitrary_configs.py @@ -12,6 +12,7 @@ --seed= random number seed --base whether to use the base sql schedule or not """ + import concurrent.futures import multiprocessing import os diff --git a/src/test/regress/citus_tests/common.py b/src/test/regress/citus_tests/common.py index 9e4d8b75e14..825db9739bc 100644 --- a/src/test/regress/citus_tests/common.py +++ b/src/test/regress/citus_tests/common.py @@ -1129,15 +1129,11 @@ def wait_for_catchup(self, subscription_name, mode="replay", target_lsn=None): # Before release 12 walreceiver just set the application name to # "walreceiver" - self.poll_query_until( - sql.SQL( - """ + self.poll_query_until(sql.SQL(""" SELECT {} <= {} AND state = 'streaming' FROM pg_catalog.pg_stat_replication WHERE application_name IN ({}, 'walreceiver') - """ - ).format(target_lsn, sql.Identifier(f"{mode}_lsn"), subscription_name) - ) + """).format(target_lsn, sql.Identifier(f"{mode}_lsn"), subscription_name)) @contextmanager def _enable_firewall(self): diff --git a/src/test/regress/citus_tests/config.py b/src/test/regress/citus_tests/config.py index 65953f4b345..46fe96641b0 100644 --- a/src/test/regress/citus_tests/config.py +++ b/src/test/regress/citus_tests/config.py @@ -49,7 +49,7 @@ MASTER = "master" # This should be updated when citus version changes -MASTER_VERSION = "14.0" +MASTER_VERSION = "14.1" HOME = expanduser("~") diff --git a/src/test/regress/citus_tests/query_generator/data_gen.py b/src/test/regress/citus_tests/query_generator/data_gen.py index 96f7a136619..c99ccf452e1 100644 --- a/src/test/regress/citus_tests/query_generator/data_gen.py +++ b/src/test/regress/citus_tests/query_generator/data_gen.py @@ -7,7 +7,7 @@ def getTableData(): dataGenerationSql = "" tableIdx = 1 - (fromVal, toVal) = getConfig().dataRange + fromVal, toVal = getConfig().dataRange tables = getConfig().targetTables for table in tables: # generate base rows diff --git a/src/test/regress/citus_tests/query_generator/query_gen.py b/src/test/regress/citus_tests/query_generator/query_gen.py index e25525d29e2..196acbee11b 100644 --- a/src/test/regress/citus_tests/query_generator/query_gen.py +++ b/src/test/regress/citus_tests/query_generator/query_gen.py @@ -236,7 +236,7 @@ def _genLimit(genCtx): # 'LIMIT' 'random()' query = "" query += " LIMIT " - (fromVal, toVal) = getConfig().limitRange + fromVal, toVal = getConfig().limitRange query += str(random.randint(fromVal, toVal)) return query @@ -282,7 +282,7 @@ def _genRestrictExpr(genCtx): or shouldSelectThatBranch() ): query += randomRestrictOp() - (fromVal, toVal) = getConfig().filterRange + fromVal, toVal = getConfig().filterRange query += str(random.randint(fromVal, toVal)) else: if shouldSelectThatBranch(): @@ -429,6 +429,6 @@ def _genCteRte(genCtx): def _genValuesRte(genCtx): # '( VALUES(random()) )' query = "" - (fromVal, toVal) = getConfig().dataRange + fromVal, toVal = getConfig().dataRange query += " ( VALUES(" + str(random.randint(fromVal, toVal)) + " ) ) " return query diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index d38dce9a8ef..8f3806c3778 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -127,6 +127,7 @@ def extra_tests(self): ), "multi_extension": TestDeps(None, repeatable=False), "multi_test_helpers": TestDeps(None), + "multi_test_catalog_views": TestDeps(None), "multi_insert_select": TestDeps("base_schedule"), "multi_partitioning": TestDeps("base_schedule"), "multi_mx_create_table": TestDeps( @@ -164,6 +165,9 @@ def extra_tests(self): "multi_mx_schema_support": TestDeps(None, ["multi_mx_copy_data"]), "multi_simple_queries": TestDeps("base_schedule"), "create_single_shard_table": TestDeps("minimal_schedule"), + "isolation_schema_based_sharding_from_any_node": TestDeps( + None, ["isolation_setup"] + ), "isolation_extension_commands": TestDeps( None, ["isolation_setup", "isolation_add_remove_node"] ), @@ -186,6 +190,14 @@ def extra_tests(self): # because it queries node group id and it changes as we add / remove nodes repeatable=False, ), + "multi_mx_metadata": TestDeps( + None, + [ + "multi_cluster_management", + "remove_coordinator_from_metadata", + "multi_test_catalog_views", + ], + ), "multi_mx_add_coordinator": TestDeps( None, [ diff --git a/src/test/regress/citus_tests/test/test_columnar.py b/src/test/regress/citus_tests/test/test_columnar.py index 568432f0ba3..07113a52da9 100644 --- a/src/test/regress/citus_tests/test/test_columnar.py +++ b/src/test/regress/citus_tests/test/test_columnar.py @@ -22,22 +22,18 @@ def test_freezing(coord): for _ in range(0, 10_000): cur.execute("UPDATE test_row SET i = i + 1") - frozen_age = coord.sql_value( - """ + frozen_age = coord.sql_value(""" select age(relfrozenxid) from pg_class where relname='test_columnar_freeze'; - """ - ) + """) assert frozen_age > 70_000, "columnar table was frozen" coord.sql("VACUUM FREEZE test_columnar_freeze") - frozen_age = coord.sql_value( - """ + frozen_age = coord.sql_value(""" select age(relfrozenxid) from pg_class where relname='test_columnar_freeze'; - """ - ) + """) assert frozen_age < 70_000, "columnar table was not frozen" coord.sql("DROP EXTENSION citus_columnar CASCADE") diff --git a/src/test/regress/citus_tests/test/test_prepared_statements.py b/src/test/regress/citus_tests/test/test_prepared_statements.py index 761ecc30ce9..68a81bb039a 100644 --- a/src/test/regress/citus_tests/test/test_prepared_statements.py +++ b/src/test/regress/citus_tests/test/test_prepared_statements.py @@ -4,14 +4,12 @@ def test_call_param(cluster): # distribution key. coord = cluster.coordinator coord.sql("CREATE TABLE test(i int)") - coord.sql( - """ + coord.sql(""" CREATE PROCEDURE p(_i INT) LANGUAGE plpgsql AS $$ BEGIN INSERT INTO test(i) VALUES (_i); END; $$ - """ - ) + """) sql = "CALL p(%s)" # prepare/exec before distributing diff --git a/src/test/regress/expected/alter_table_add_column.out b/src/test/regress/expected/alter_table_add_column.out index 0408aeeab97..b911d6bd3de 100644 --- a/src/test/regress/expected/alter_table_add_column.out +++ b/src/test/regress/expected/alter_table_add_column.out @@ -87,10 +87,10 @@ SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes( ) JOIN pg_dist_node USING (nodeid) ORDER BY is_coordinator DESC, result; - is_coordinator | result + is_coordinator | result --------------------------------------------------------------------- t | [{"column_name": "test_1", "column_default": "(((alter_table_add_column_other_schema.my_random((7)::numeric))::double precision + random()) + (5)::double precision)", "generation_expression": null}, {"column_name": "test_3", "column_default": null, "generation_expression": "((test_1)::numeric * alter_table_add_column_other_schema.my_random((1)::numeric))"}] - f | [{"column_name": "test_1", "column_default": "(((alter_table_add_column_other_schema.my_random((7)::numeric))::double precision + random()) + (5)::double precision)", "generation_expression": null}, {"column_name": "test_3", "column_default": null, "generation_expression": "((test_1)::numeric * alter_table_add_column_other_schema.my_random((1)::numeric))"}, {"column_name": "test_3", "column_default": null, "generation_expression": "((test_1)::numeric * alter_table_add_column_other_schema.my_random((1)::numeric))"}] + f | [{"column_name": "test_1", "column_default": "(((alter_table_add_column_other_schema.my_random((7)::numeric))::double precision + random()) + (5)::double precision)", "generation_expression": null}, {"column_name": "test_3", "column_default": null, "generation_expression": "((test_1)::numeric * alter_table_add_column_other_schema.my_random((1)::numeric))"}] f | [{"column_name": "test_1", "column_default": "(((alter_table_add_column_other_schema.my_random((7)::numeric))::double precision + random()) + (5)::double precision)", "generation_expression": null}, {"column_name": "test_3", "column_default": null, "generation_expression": "((test_1)::numeric * alter_table_add_column_other_schema.my_random((1)::numeric))"}] (3 rows) @@ -102,8 +102,8 @@ ORDER BY is_coordinator DESC, result; is_coordinator | result --------------------------------------------------------------------- t | {"relnames": ["alter_table_add_column.referencing"], "column_attrs": [{"not_null": true, "type_name": "int4", "column_name": "test_1", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_2", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_3", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "int4", "column_name": "test_4", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_5", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "text", "column_name": "test_6", "collation_name": "caseinsensitive", "compression_method": "p"}, {"not_null": false, "type_name": "int4", "column_name": "test_8", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "simple_!\\'custom_type", "column_name": "test_\\'!7", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "text", "column_name": "text_col", "collation_name": "default", "compression_method": ""}]} + f | {"relnames": ["alter_table_add_column.referencing", "alter_table_add_column.referencing_1830001"], "column_attrs": [{"not_null": true, "type_name": "int4", "column_name": "test_1", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_2", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_3", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "int4", "column_name": "test_4", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_5", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "text", "column_name": "test_6", "collation_name": "caseinsensitive", "compression_method": "p"}, {"not_null": false, "type_name": "int4", "column_name": "test_8", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "simple_!\\'custom_type", "column_name": "test_\\'!7", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "text", "column_name": "text_col", "collation_name": "default", "compression_method": ""}]} f | {"relnames": ["alter_table_add_column.referencing"], "column_attrs": [{"not_null": true, "type_name": "int4", "column_name": "test_1", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_2", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_3", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "int4", "column_name": "test_4", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_5", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "text", "column_name": "test_6", "collation_name": "caseinsensitive", "compression_method": "p"}, {"not_null": false, "type_name": "int4", "column_name": "test_8", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "simple_!\\'custom_type", "column_name": "test_\\'!7", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "text", "column_name": "text_col", "collation_name": "default", "compression_method": ""}]} - f | {"relnames": ["alter_table_add_column.referencing_1830001", "alter_table_add_column.referencing"], "column_attrs": [{"not_null": true, "type_name": "int4", "column_name": "test_1", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_2", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_3", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "int4", "column_name": "test_4", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_5", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "text", "column_name": "test_6", "collation_name": "caseinsensitive", "compression_method": "p"}, {"not_null": false, "type_name": "int4", "column_name": "test_8", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "simple_!\\'custom_type", "column_name": "test_\\'!7", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "text", "column_name": "text_col", "collation_name": "default", "compression_method": ""}]} (3 rows) SET client_min_messages TO WARNING; diff --git a/src/test/regress/expected/failure_mx_metadata_sync.out b/src/test/regress/expected/failure_mx_metadata_sync.out index c2418e9ab22..7c0e72df8d9 100644 --- a/src/test/regress/expected/failure_mx_metadata_sync.out +++ b/src/test/regress/expected/failure_mx_metadata_sync.out @@ -157,27 +157,6 @@ WHERE logicalrelid='t2'::regclass; f (1 row) --- Failure to set groupid in the worker -SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").cancel(' || :pid || ')'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); -ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").kill()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -- Failure to delete pg_dist_node entries from the worker SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").cancel(' || :pid || ')'); mitmproxy diff --git a/src/test/regress/expected/isolation_schema_based_sharding_from_any_node.out b/src/test/regress/expected/isolation_schema_based_sharding_from_any_node.out new file mode 100644 index 00000000000..6f2becacc8d --- /dev/null +++ b/src/test/regress/expected/isolation_schema_based_sharding_from_any_node.out @@ -0,0 +1,796 @@ +Parsed test spec with 3 sessions + +starting permutation: coord-create-table-ref coord-create-reference-table-ref coord-remove-worker-57638 coord-begin coord-add-worker-57638 worker-57637-start worker-57637-shard-replication-factor worker-57637-create-table-sc1-t2 coord-commit coord-query-ref-placements coord-query-sc1-t2-placement worker-57637-shard-replication-factor worker-57637-create-table-sc1-t1 worker-57637-stop coord-query-ref-placements coord-query-sc1-t1-placement +step coord-create-table-ref: CREATE TABLE ref (id int PRIMARY KEY); +step coord-create-reference-table-ref: SELECT create_reference_table('ref'); +create_reference_table +--------------------------------------------------------------------- + +(1 row) + +step coord-remove-worker-57638: SELECT 1 FROM citus_remove_node('localhost', 57638); +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step coord-begin: BEGIN; +step coord-add-worker-57638: SELECT 1 FROM citus_add_node('localhost', 57638); +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step worker-57637-start: SELECT start_session_level_connection_to_node('localhost', 57637); +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-shard-replication-factor: SELECT run_commands_on_session_level_connection_to_node('SET citus.shard_replication_factor TO 1'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-create-table-sc1-t2: SELECT run_commands_on_session_level_connection_to_node('CREATE TABLE sc1.t2 (a int)'); +step coord-commit: COMMIT; +step worker-57637-create-table-sc1-t2: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coord-query-ref-placements: SELECT nodeport, success, result FROM run_command_on_placements('ref', 'SELECT count(*) FROM %s') ORDER BY nodeport; +nodeport|success|result +--------------------------------------------------------------------- + 57636|t | 0 + 57637|t | 0 +(2 rows) + +step coord-query-sc1-t2-placement: SELECT nodeport, success, result FROM run_command_on_placements('sc1.t2', 'SELECT count(*) FROM %s') ORDER BY nodeport; +nodeport|success|result +--------------------------------------------------------------------- + 57638|t | 0 +(1 row) + +step worker-57637-shard-replication-factor: SELECT run_commands_on_session_level_connection_to_node('SET citus.shard_replication_factor TO 1'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-create-table-sc1-t1: SELECT run_commands_on_session_level_connection_to_node('CREATE TABLE sc1.t1 (a int)'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-stop: SELECT stop_session_level_connection_to_node(); +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coord-query-ref-placements: SELECT nodeport, success, result FROM run_command_on_placements('ref', 'SELECT count(*) FROM %s') ORDER BY nodeport; +nodeport|success|result +--------------------------------------------------------------------- + 57636|t | 0 + 57637|t | 0 + 57638|t | 0 +(3 rows) + +step coord-query-sc1-t1-placement: SELECT nodeport, success, result FROM run_command_on_placements('sc1.t1', 'SELECT count(*) FROM %s') ORDER BY nodeport; +nodeport|success|result +--------------------------------------------------------------------- + 57638|t | 0 +(1 row) + +?column? +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: coord-shard-replication-factor coord-create-table-sc1-t1 coord-query-sc1-t1-placement coord-begin coord-move-shard-sc1-t1 worker-57637-start worker-57637-shard-replication-factor worker-57637-create-table-sc1-t2 coord-commit worker-57637-stop coord-query-sc1-t1-placement +step coord-shard-replication-factor: SET citus.shard_replication_factor TO 1; +step coord-create-table-sc1-t1: CREATE TABLE sc1.t1 (a int); +step coord-query-sc1-t1-placement: SELECT nodeport, success, result FROM run_command_on_placements('sc1.t1', 'SELECT count(*) FROM %s') ORDER BY nodeport; +nodeport|success|result +--------------------------------------------------------------------- + 57638|t | 0 +(1 row) + +step coord-begin: BEGIN; +step coord-move-shard-sc1-t1: + SELECT citus_move_shard_placement( + s.shardid, + src.nodename, src.nodeport, + dst.nodename, dst.nodeport, + 'block_writes' + ) + FROM pg_dist_shard s + JOIN pg_dist_shard_placement src USING (shardid) + CROSS JOIN ( + SELECT nodename, nodeport + FROM pg_dist_node + WHERE noderole = 'primary' AND isactive AND shouldhaveshards AND + (nodename, nodeport) NOT IN ( + SELECT p.nodename, p.nodeport + FROM pg_dist_shard_placement p + JOIN pg_dist_shard ps USING (shardid) + WHERE ps.logicalrelid = 'sc1.t1'::regclass + ) + ORDER BY nodeport + LIMIT 1 + ) dst + WHERE s.logicalrelid = 'sc1.t1'::regclass; + +citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-start: SELECT start_session_level_connection_to_node('localhost', 57637); +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-shard-replication-factor: SELECT run_commands_on_session_level_connection_to_node('SET citus.shard_replication_factor TO 1'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-create-table-sc1-t2: SELECT run_commands_on_session_level_connection_to_node('CREATE TABLE sc1.t2 (a int)'); +ERROR: could not acquire the lock required to colocate distributed table sc1.t1 +step coord-commit: COMMIT; +step worker-57637-stop: SELECT stop_session_level_connection_to_node(); +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coord-query-sc1-t1-placement: SELECT nodeport, success, result FROM run_command_on_placements('sc1.t1', 'SELECT count(*) FROM %s') ORDER BY nodeport; +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | 0 +(1 row) + +?column? +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: coord-shard-replication-factor coord-create-table-sc1-t1 worker-57638-start worker-57638-begin worker-57638-drop-table-sc1-t1 worker-57637-start worker-57637-shard-replication-factor worker-57637-create-table-sc1-t2 worker-57638-commit worker-57638-stop worker-57637-stop coord-show-tables-in-schema-sc1 +step coord-shard-replication-factor: SET citus.shard_replication_factor TO 1; +step coord-create-table-sc1-t1: CREATE TABLE sc1.t1 (a int); +step worker-57638-start: SELECT start_session_level_connection_to_node('localhost', 57638); +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-begin: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-drop-table-sc1-t1: SELECT run_commands_on_session_level_connection_to_node('DROP TABLE sc1.t1'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-start: SELECT start_session_level_connection_to_node('localhost', 57637); +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-shard-replication-factor: SELECT run_commands_on_session_level_connection_to_node('SET citus.shard_replication_factor TO 1'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-create-table-sc1-t2: SELECT run_commands_on_session_level_connection_to_node('CREATE TABLE sc1.t2 (a int)'); +step worker-57638-commit: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-create-table-sc1-t2: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-stop: SELECT stop_session_level_connection_to_node(); +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-stop: SELECT stop_session_level_connection_to_node(); +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coord-show-tables-in-schema-sc1: + SELECT nodeport, success, result + FROM run_command_on_all_nodes($$ + SELECT array_agg(tablename ORDER BY tablename) FROM pg_tables WHERE schemaname = 'sc1' AND tablename IN ('t1', 't2', 't1_renamed') + $$) + JOIN pg_dist_node USING (nodeid) + ORDER BY nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57636|t |{t2} + 57637|t |{t2} + 57638|t |{t2} +(3 rows) + +?column? +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: coord-shard-replication-factor coord-create-table-sc1-t1 coord-query-sc1-t1-placement worker-57637-start worker-57637-shard-replication-factor worker-57637-begin worker-57637-create-table-sc1-t2 coord-move-shard-sc1-t1 worker-57637-commit worker-57637-stop coord-query-sc1-t1-placement +step coord-shard-replication-factor: SET citus.shard_replication_factor TO 1; +step coord-create-table-sc1-t1: CREATE TABLE sc1.t1 (a int); +step coord-query-sc1-t1-placement: SELECT nodeport, success, result FROM run_command_on_placements('sc1.t1', 'SELECT count(*) FROM %s') ORDER BY nodeport; +nodeport|success|result +--------------------------------------------------------------------- + 57638|t | 0 +(1 row) + +step worker-57637-start: SELECT start_session_level_connection_to_node('localhost', 57637); +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-shard-replication-factor: SELECT run_commands_on_session_level_connection_to_node('SET citus.shard_replication_factor TO 1'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-begin: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-create-table-sc1-t2: SELECT run_commands_on_session_level_connection_to_node('CREATE TABLE sc1.t2 (a int)'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coord-move-shard-sc1-t1: + SELECT citus_move_shard_placement( + s.shardid, + src.nodename, src.nodeport, + dst.nodename, dst.nodeport, + 'block_writes' + ) + FROM pg_dist_shard s + JOIN pg_dist_shard_placement src USING (shardid) + CROSS JOIN ( + SELECT nodename, nodeport + FROM pg_dist_node + WHERE noderole = 'primary' AND isactive AND shouldhaveshards AND + (nodename, nodeport) NOT IN ( + SELECT p.nodename, p.nodeport + FROM pg_dist_shard_placement p + JOIN pg_dist_shard ps USING (shardid) + WHERE ps.logicalrelid = 'sc1.t1'::regclass + ) + ORDER BY nodeport + LIMIT 1 + ) dst + WHERE s.logicalrelid = 'sc1.t1'::regclass; + +ERROR: could not acquire the lock required to move sc1.t1 +step worker-57637-commit: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-stop: SELECT stop_session_level_connection_to_node(); +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coord-query-sc1-t1-placement: SELECT nodeport, success, result FROM run_command_on_placements('sc1.t1', 'SELECT count(*) FROM %s') ORDER BY nodeport; +nodeport|success|result +--------------------------------------------------------------------- + 57638|t | 0 +(1 row) + +?column? +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: coord-shard-replication-factor coord-create-table-sc1-t1 worker-57637-start worker-57637-shard-replication-factor worker-57638-start worker-57637-begin worker-57637-create-table-sc1-t2 worker-57638-drop-table-sc1-t1 worker-57637-commit worker-57638-stop worker-57637-stop coord-show-tables-in-schema-sc1 +step coord-shard-replication-factor: SET citus.shard_replication_factor TO 1; +step coord-create-table-sc1-t1: CREATE TABLE sc1.t1 (a int); +step worker-57637-start: SELECT start_session_level_connection_to_node('localhost', 57637); +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-shard-replication-factor: SELECT run_commands_on_session_level_connection_to_node('SET citus.shard_replication_factor TO 1'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-start: SELECT start_session_level_connection_to_node('localhost', 57638); +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-begin: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-create-table-sc1-t2: SELECT run_commands_on_session_level_connection_to_node('CREATE TABLE sc1.t2 (a int)'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-drop-table-sc1-t1: SELECT run_commands_on_session_level_connection_to_node('DROP TABLE sc1.t1'); +step worker-57637-commit: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-drop-table-sc1-t1: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-stop: SELECT stop_session_level_connection_to_node(); +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-stop: SELECT stop_session_level_connection_to_node(); +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coord-show-tables-in-schema-sc1: + SELECT nodeport, success, result + FROM run_command_on_all_nodes($$ + SELECT array_agg(tablename ORDER BY tablename) FROM pg_tables WHERE schemaname = 'sc1' AND tablename IN ('t1', 't2', 't1_renamed') + $$) + JOIN pg_dist_node USING (nodeid) + ORDER BY nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57636|t |{t2} + 57637|t |{t2} + 57638|t |{t2} +(3 rows) + +?column? +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: worker-57637-start worker-57637-shard-replication-factor worker-57637-begin worker-57637-create-table-sc1-t1 worker-57638-start worker-57638-shard-replication-factor worker-57638-create-table-sc1-t2 worker-57637-commit worker-57637-stop worker-57638-commit worker-57638-stop coord-show-tables-in-schema-sc1 +step worker-57637-start: SELECT start_session_level_connection_to_node('localhost', 57637); +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-shard-replication-factor: SELECT run_commands_on_session_level_connection_to_node('SET citus.shard_replication_factor TO 1'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-begin: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-create-table-sc1-t1: SELECT run_commands_on_session_level_connection_to_node('CREATE TABLE sc1.t1 (a int)'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-start: SELECT start_session_level_connection_to_node('localhost', 57638); +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-shard-replication-factor: SELECT run_commands_on_session_level_connection_to_node('SET citus.shard_replication_factor TO 1'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-create-table-sc1-t2: SELECT run_commands_on_session_level_connection_to_node('CREATE TABLE sc1.t2 (a int)'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-commit: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-stop: SELECT stop_session_level_connection_to_node(); +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-commit: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-stop: SELECT stop_session_level_connection_to_node(); +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coord-show-tables-in-schema-sc1: + SELECT nodeport, success, result + FROM run_command_on_all_nodes($$ + SELECT array_agg(tablename ORDER BY tablename) FROM pg_tables WHERE schemaname = 'sc1' AND tablename IN ('t1', 't2', 't1_renamed') + $$) + JOIN pg_dist_node USING (nodeid) + ORDER BY nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57636|t |{t1,t2} + 57637|t |{t1,t2} + 57638|t |{t1,t2} +(3 rows) + +?column? +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: coord-remove-worker-57638 coord-create-table-ref coord-create-reference-table-ref coord-add-worker-57638 worker-57637-start worker-57637-begin worker-57637-shard-replication-factor worker-57637-create-table-sc1-t1 coord-begin coord-shard-replication-factor coord-create-table-sc1-t2 worker-57637-commit worker-57637-stop coord-commit coord-query-ref-placements coord-query-sc1-t1-placement coord-query-sc1-t2-placement +step coord-remove-worker-57638: SELECT 1 FROM citus_remove_node('localhost', 57638); +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step coord-create-table-ref: CREATE TABLE ref (id int PRIMARY KEY); +step coord-create-reference-table-ref: SELECT create_reference_table('ref'); +create_reference_table +--------------------------------------------------------------------- + +(1 row) + +step coord-add-worker-57638: SELECT 1 FROM citus_add_node('localhost', 57638); +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step worker-57637-start: SELECT start_session_level_connection_to_node('localhost', 57637); +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-begin: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-shard-replication-factor: SELECT run_commands_on_session_level_connection_to_node('SET citus.shard_replication_factor TO 1'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-create-table-sc1-t1: SELECT run_commands_on_session_level_connection_to_node('CREATE TABLE sc1.t1 (a int)'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coord-begin: BEGIN; +step coord-shard-replication-factor: SET citus.shard_replication_factor TO 1; +step coord-create-table-sc1-t2: CREATE TABLE sc1.t2 (a int); +step worker-57637-commit: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-stop: SELECT stop_session_level_connection_to_node(); +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coord-commit: COMMIT; +step coord-query-ref-placements: SELECT nodeport, success, result FROM run_command_on_placements('ref', 'SELECT count(*) FROM %s') ORDER BY nodeport; +nodeport|success|result +--------------------------------------------------------------------- + 57636|t | 0 + 57637|t | 0 + 57638|t | 0 +(3 rows) + +step coord-query-sc1-t1-placement: SELECT nodeport, success, result FROM run_command_on_placements('sc1.t1', 'SELECT count(*) FROM %s') ORDER BY nodeport; +nodeport|success|result +--------------------------------------------------------------------- + 57638|t | 0 +(1 row) + +step coord-query-sc1-t2-placement: SELECT nodeport, success, result FROM run_command_on_placements('sc1.t2', 'SELECT count(*) FROM %s') ORDER BY nodeport; +nodeport|success|result +--------------------------------------------------------------------- + 57638|t | 0 +(1 row) + +?column? +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: worker-57637-start worker-57637-shard-replication-factor worker-57637-begin worker-57637-create-table-sc1-t1 worker-57638-start worker-57638-shard-replication-factor worker-57638-create-table-sc1-t1 worker-57637-commit worker-57637-stop worker-57638-stop coord-show-tables-in-schema-sc1 +step worker-57637-start: SELECT start_session_level_connection_to_node('localhost', 57637); +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-shard-replication-factor: SELECT run_commands_on_session_level_connection_to_node('SET citus.shard_replication_factor TO 1'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-begin: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-create-table-sc1-t1: SELECT run_commands_on_session_level_connection_to_node('CREATE TABLE sc1.t1 (a int)'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-start: SELECT start_session_level_connection_to_node('localhost', 57638); +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-shard-replication-factor: SELECT run_commands_on_session_level_connection_to_node('SET citus.shard_replication_factor TO 1'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-create-table-sc1-t1: SELECT run_commands_on_session_level_connection_to_node('CREATE TABLE sc1.t1 (a int)'); +step worker-57637-commit: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-create-table-sc1-t1: <... completed> +ERROR: duplicate key value violates unique constraint "pg_type_typname_nsp_index" +step worker-57637-stop: SELECT stop_session_level_connection_to_node(); +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-stop: SELECT stop_session_level_connection_to_node(); +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coord-show-tables-in-schema-sc1: + SELECT nodeport, success, result + FROM run_command_on_all_nodes($$ + SELECT array_agg(tablename ORDER BY tablename) FROM pg_tables WHERE schemaname = 'sc1' AND tablename IN ('t1', 't2', 't1_renamed') + $$) + JOIN pg_dist_node USING (nodeid) + ORDER BY nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57636|t |{t1} + 57637|t |{t1} + 57638|t |{t1} +(3 rows) + +?column? +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: coord-shard-replication-factor coord-create-table-sc1-t1 worker-57637-start worker-57637-begin worker-57637-drop-table-sc1-t1 worker-57638-start worker-57638-drop-table-sc1-t1 worker-57637-commit worker-57637-stop worker-57638-stop coord-show-tables-in-schema-sc1 +step coord-shard-replication-factor: SET citus.shard_replication_factor TO 1; +step coord-create-table-sc1-t1: CREATE TABLE sc1.t1 (a int); +step worker-57637-start: SELECT start_session_level_connection_to_node('localhost', 57637); +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-begin: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-drop-table-sc1-t1: SELECT run_commands_on_session_level_connection_to_node('DROP TABLE sc1.t1'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-start: SELECT start_session_level_connection_to_node('localhost', 57638); +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-drop-table-sc1-t1: SELECT run_commands_on_session_level_connection_to_node('DROP TABLE sc1.t1'); +step worker-57637-commit: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-drop-table-sc1-t1: <... completed> +ERROR: table "t1" does not exist +step worker-57637-stop: SELECT stop_session_level_connection_to_node(); +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-stop: SELECT stop_session_level_connection_to_node(); +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coord-show-tables-in-schema-sc1: + SELECT nodeport, success, result + FROM run_command_on_all_nodes($$ + SELECT array_agg(tablename ORDER BY tablename) FROM pg_tables WHERE schemaname = 'sc1' AND tablename IN ('t1', 't2', 't1_renamed') + $$) + JOIN pg_dist_node USING (nodeid) + ORDER BY nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57636|t | + 57637|t | + 57638|t | +(3 rows) + +?column? +--------------------------------------------------------------------- +t +(1 row) + + +starting permutation: coord-shard-replication-factor coord-create-table-sc1-t1 worker-57637-start worker-57637-begin worker-57637-alter-table-rename-sc1-t1 worker-57638-start worker-57638-alter-table-rename-sc1-t1 worker-57637-commit worker-57637-stop worker-57638-stop coord-show-tables-in-schema-sc1 +step coord-shard-replication-factor: SET citus.shard_replication_factor TO 1; +step coord-create-table-sc1-t1: CREATE TABLE sc1.t1 (a int); +step worker-57637-start: SELECT start_session_level_connection_to_node('localhost', 57637); +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-begin: SELECT run_commands_on_session_level_connection_to_node('BEGIN'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57637-alter-table-rename-sc1-t1: SELECT run_commands_on_session_level_connection_to_node('ALTER TABLE sc1.t1 RENAME TO t1_renamed'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-start: SELECT start_session_level_connection_to_node('localhost', 57638); +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-alter-table-rename-sc1-t1: SELECT run_commands_on_session_level_connection_to_node('ALTER TABLE sc1.t1 RENAME TO t1_renamed'); +step worker-57637-commit: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-alter-table-rename-sc1-t1: <... completed> +ERROR: relation "sc1.t1" does not exist +step worker-57637-stop: SELECT stop_session_level_connection_to_node(); +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step worker-57638-stop: SELECT stop_session_level_connection_to_node(); +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coord-show-tables-in-schema-sc1: + SELECT nodeport, success, result + FROM run_command_on_all_nodes($$ + SELECT array_agg(tablename ORDER BY tablename) FROM pg_tables WHERE schemaname = 'sc1' AND tablename IN ('t1', 't2', 't1_renamed') + $$) + JOIN pg_dist_node USING (nodeid) + ORDER BY nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57636|t |{t1_renamed} + 57637|t |{t1_renamed} + 57638|t |{t1_renamed} +(3 rows) + +?column? +--------------------------------------------------------------------- +t +(1 row) + diff --git a/src/test/regress/expected/issue_6592.out b/src/test/regress/expected/issue_6592.out index f9b8a632e02..4d586d5252c 100644 --- a/src/test/regress/expected/issue_6592.out +++ b/src/test/regress/expected/issue_6592.out @@ -1,4 +1,17 @@ -- https://github.com/citusdata/citus/issues/6592 +-- first, make sure to remove the coordinator if it was already added +SET client_min_messages to ERROR; +SELECT COUNT(*)>=0 FROM ( + SELECT master_remove_node(nodename, nodeport) + FROM pg_dist_node + WHERE nodename = 'localhost' AND nodeport = :master_port +) q; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +RESET client_min_messages; SET citus.next_shard_id TO 180000; CREATE TABLE ref_table_to_be_dropped_6592 (key int); SELECT create_reference_table('ref_table_to_be_dropped_6592'); @@ -10,7 +23,7 @@ SELECT create_reference_table('ref_table_to_be_dropped_6592'); CREATE TABLE ref_table_oid AS SELECT oid FROM pg_class WHERE relname = 'ref_table_to_be_dropped_6592'; SET citus.enable_ddl_propagation TO OFF; DROP TABLE ref_table_to_be_dropped_6592 CASCADE; -- citus_drop_all_shards doesn't drop shards and metadata --- ensure that coordinator is added to pg_dist_node +-- add the coordinator to pg_dist_node SET client_min_messages to ERROR; SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0); ?column? @@ -51,3 +64,76 @@ SELECT 1 FROM citus_remove_node('localhost', :master_port); 1 (1 row) +-- test the same when creating a distributed-schema table from a worker +SET citus.next_shard_id TO 180100; +CREATE TABLE other_ref_table_to_be_dropped (key int); +SELECT create_reference_table('other_ref_table_to_be_dropped'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE other_ref_table_oid AS SELECT oid FROM pg_class WHERE relname = 'other_ref_table_to_be_dropped'; +SET citus.enable_ddl_propagation TO OFF; +DROP TABLE other_ref_table_to_be_dropped CASCADE; -- citus_drop_all_shards doesn't drop shards and metadata +RESET citus.enable_ddl_propagation; +-- add the coordinator to pg_dist_node +SET client_min_messages to ERROR; +SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +RESET client_min_messages; +-- As we always grab the next shard id from the coordinator, we need to alter +-- the sequence on the coordinator, so we first store it. Since the connection +-- that we internally use to get the next shard id from the coordinator might +-- change, we cannot just set citus.next_shard_id on the coordinator because +-- doing so wouldn't affect the further connections to the coordinator. +SELECT last_value::bigint INTO pg_dist_shardid_seq_prev_state FROM pg_catalog.pg_dist_shardid_seq; +ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 180150; +\c - - - :worker_1_port +SET citus.enable_schema_based_sharding TO ON; +CREATE SCHEMA s1; +RESET citus.enable_schema_based_sharding; +-- errors out for the dropped reference table +SET citus.shard_replication_factor TO 1; +CREATE TABLE s1.t1 (a int); +ERROR: relation with OID XXXX does not exist +CONTEXT: while executing command on localhost:xxxxx +\c - - - :worker_1_port +SET citus.enable_ddl_propagation TO OFF; +DELETE FROM pg_dist_partition WHERE logicalrelid = 'other_ref_table_to_be_dropped'::regclass; +DELETE FROM pg_dist_placement WHERE shardid = 180100; +DELETE FROM pg_dist_shard WHERE shardid = 180100; +DROP TABLE IF EXISTS other_ref_table_to_be_dropped; +DROP TABLE IF EXISTS other_ref_table_to_be_dropped_180100; +\c - - - :worker_2_port +SET citus.enable_ddl_propagation TO OFF; +DELETE FROM pg_dist_partition WHERE logicalrelid = 'other_ref_table_to_be_dropped'::regclass; +DELETE FROM pg_dist_placement WHERE shardid = 180100; +DELETE FROM pg_dist_shard WHERE shardid = 180100; +DROP TABLE IF EXISTS other_ref_table_to_be_dropped; +DROP TABLE IF EXISTS other_ref_table_to_be_dropped_180100; +\c - - - :master_port +DELETE FROM pg_dist_placement WHERE shardid = 180100; +DELETE FROM pg_dist_shard WHERE shardid = 180100; +DELETE FROM pg_dist_partition WHERE logicalrelid IN (SELECT oid FROM other_ref_table_oid); +DROP TABLE other_ref_table_oid; +DROP SCHEMA s1; +SELECT 1 FROM citus_remove_node('localhost', :master_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- reset pg_dist_shardid_seq on the coordinator +DO $proc$ +DECLARE + v_last_value bigint; +BEGIN + SELECT last_value INTO v_last_value FROM pg_dist_shardid_seq_prev_state; + EXECUTE format('ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH %s', v_last_value); +END$proc$; +DROP TABLE pg_dist_shardid_seq_prev_state; diff --git a/src/test/regress/expected/multi_cluster_management.out b/src/test/regress/expected/multi_cluster_management.out index 44228162220..3523b11887b 100644 --- a/src/test/regress/expected/multi_cluster_management.out +++ b/src/test/regress/expected/multi_cluster_management.out @@ -1268,6 +1268,8 @@ BEGIN; (1 row) DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated; + -- we're not interested in what we send to the nodes we're removing + SET LOCAL citus.log_remote_commands TO OFF; SELECT 1 FROM citus_remove_node('localhost', :worker_1_port); ?column? --------------------------------------------------------------------- @@ -1280,6 +1282,7 @@ BEGIN; 1 (1 row) + SET LOCAL citus.log_remote_commands TO ON; SELECT 1 FROM citus_add_node('localhost', :worker_1_port); NOTICE: issuing SELECT metadata ->> 'server_id' AS server_id FROM pg_dist_node_metadata DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 34851053b73..d8125b77074 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1675,12 +1675,33 @@ SELECT * FROM multi_extension.print_extension_changes(); | function worker_binary_partial_agg_ffunc(internal) bytea (6 rows) +-- Test downgrade to 14.0-1 from 14.1-1 +ALTER EXTENSION citus UPDATE TO '14.1-1'; +ALTER EXTENSION citus UPDATE TO '14.0-1'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 14.1-1 +ALTER EXTENSION citus UPDATE TO '14.1-1'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + | function citus_internal.acquire_placement_colocation_lock(bigint,integer) integer + | function citus_internal.adjust_identity_column_seq_settings(regclass,bigint,boolean) void + | function citus_internal.get_next_colocation_id() bigint + | function citus_internal.lock_colocation_id(integer,integer) void + | function worker_apply_sequence_command(text,regtype,bigint,boolean) void +(5 rows) + DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version SHOW citus.version; citus.version --------------------------------------------------------------------- - 14.0.1 + 14.1.0 (1 row) -- ensure no unexpected objects were created outside pg_catalog @@ -1715,7 +1736,7 @@ DROP EXTENSION citus; DROP EXTENSION citus_columnar; CREATE EXTENSION citus VERSION '8.0-1'; ERROR: specified version incompatible with loaded Citus library -DETAIL: Loaded library requires 14.0, but 8.0-1 was specified. +DETAIL: Loaded library requires 14.1, but 8.0-1 was specified. HINT: If a newer library is present, restart the database and try the command again. -- Test non-distributed queries work even in version mismatch SET citus.enable_version_checks TO 'false'; @@ -1760,7 +1781,7 @@ ORDER BY 1; -- We should not distribute table in version mistmatch SELECT create_distributed_table('version_mismatch_table', 'column1'); ERROR: loaded Citus library version differs from installed extension version -DETAIL: Loaded library requires 14.0, but the installed extension version is 8.1-1. +DETAIL: Loaded library requires 14.1, but the installed extension version is 8.1-1. HINT: Run ALTER EXTENSION citus UPDATE and try again. -- This function will cause fail in next ALTER EXTENSION CREATE OR REPLACE FUNCTION pg_catalog.relation_is_a_known_shard(regclass) diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 470fe41cc52..7695b18f1a8 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -2126,18 +2126,7 @@ DROP TABLE mx_test_schema_1.mx_table_1 CASCADE; DROP TABLE mx_testing_schema.mx_test_table; DROP TABLE mx_ref; DROP TABLE dist_table_1, dist_table_2; -SET client_min_messages TO ERROR; -SET citus.enable_ddl_propagation TO off; -- for enterprise CREATE USER non_super_metadata_user; -SET citus.enable_ddl_propagation TO on; -RESET client_min_messages; -SELECT run_command_on_workers('CREATE USER non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - GRANT EXECUTE ON FUNCTION start_metadata_sync_to_node(text,int) TO non_super_metadata_user; GRANT EXECUTE ON FUNCTION stop_metadata_sync_to_node(text,int,bool) TO non_super_metadata_user; GRANT ALL ON pg_dist_node TO non_super_metadata_user; diff --git a/src/test/regress/expected/multi_mx_metadata.out b/src/test/regress/expected/multi_mx_metadata.out index 28274b0cc76..a69b8ff13ed 100644 --- a/src/test/regress/expected/multi_mx_metadata.out +++ b/src/test/regress/expected/multi_mx_metadata.out @@ -245,10 +245,10 @@ SELECT count(*) FROM pg_tables WHERE tablename LIKE 'objects_for_xacts2_%' and s 0 (1 row) --- make sure that citus_drop_all_shards does not work from the worker nodes +-- make sure that citus_drop_all_shards does not work from the worker nodes when the coordinator is not in the metadata SELECT citus_drop_all_shards('citus_mx_schema_for_xacts.objects_for_xacts'::regclass, 'citus_mx_schema_for_xacts', 'objects_for_xacts'); -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname -- Ensure pg_dist_transaction is empty for test SELECT recover_prepared_transactions(); recover_prepared_transactions @@ -354,11 +354,11 @@ $$); (1 row) SELECT citus_drop_all_shards('distributed_mx_table'::regclass, 'public', 'distributed_mx_table'); -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname SELECT master_remove_partition_metadata('distributed_mx_table'::regclass, 'public', 'distributed_mx_table'); -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname -- make sure that we can drop unrelated tables/sequences CREATE TABLE unrelated_table(key serial); DROP TABLE unrelated_table; @@ -381,3 +381,9 @@ SELECT pg_reload_conf(); t (1 row) +SET client_min_messages TO WARNING; +DROP SCHEMA citus_mx_schema_for_xacts CASCADE; +DROP TABLE distributed_mx_table CASCADE; +DROP USER no_access_mx; +\c - postgres - :worker_1_port +DROP TABLE should_commit CASCADE; diff --git a/src/test/regress/expected/multi_mx_schema_support.out b/src/test/regress/expected/multi_mx_schema_support.out index 4e61d85d8bf..f3649d7b3a2 100644 --- a/src/test/regress/expected/multi_mx_schema_support.out +++ b/src/test/regress/expected/multi_mx_schema_support.out @@ -487,9 +487,16 @@ SELECT table_schema AS "Shards' Schema" (1 row) -- Show that altering distributed schema is not allowed on worker nodes +-- when the coordinator is not in the metadata. +SELECT COUNT(*)=0 FROM pg_dist_node WHERE groupid = 0; -- verify that the coordinator is not in the metadata + ?column? +--------------------------------------------------------------------- + t +(1 row) + ALTER SCHEMA mx_old_schema RENAME TO temp_mx_old_schema; -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname \c - - - :master_port ALTER TABLE mx_old_schema.table_set_schema SET SCHEMA mx_new_schema; SELECT objid::oid::regnamespace::text as "Distributed Schemas" diff --git a/src/test/regress/expected/multi_test_catalog_views.out b/src/test/regress/expected/multi_test_catalog_views.out index 65ca8f63779..630a48e6b44 100644 --- a/src/test/regress/expected/multi_test_catalog_views.out +++ b/src/test/regress/expected/multi_test_catalog_views.out @@ -1,19 +1,14 @@ --- create a temporary custom version of this function that's normally defined --- in multi_test_helpers, so that this file can be run parallel with --- multi_test_helpers during the minimal schedules -CREATE OR REPLACE FUNCTION run_command_on_master_and_workers_temp(p_sql text) -RETURNS void LANGUAGE plpgsql AS $$ -BEGIN - EXECUTE p_sql; - PERFORM run_command_on_workers(p_sql); -END;$$; -- The following views are intended as alternatives to \d commands, whose -- output changed in PostgreSQL 10. In particular, they must be used any time -- a test wishes to print out the structure of a relation, which previously -- was safely accomplished by a \d invocation. -SELECT run_command_on_master_and_workers_temp( -$desc_views$ -CREATE VIEW table_fkey_cols AS +-- +-- As we propagate CREATE VIEW commands when the view doesn't depend on an +-- un-distributable dependency, all below views are implicitly propagated as +-- they only depend on catalog objects, which are created by initdb for each +-- node separately and so are not assumed to be un-distributable by +-- GetUndistributableDependency(). +CREATE OR REPLACE VIEW table_fkey_cols AS SELECT rc.constraint_name AS "name", kcu.column_name AS "column_name", uc_kcu.column_name AS "refd_column_name", @@ -27,8 +22,7 @@ WHERE rc.constraint_schema = kcu.constraint_schema AND rc.constraint_name = kcu.constraint_name AND rc.unique_constraint_schema = uc_kcu.constraint_schema AND rc.unique_constraint_name = uc_kcu.constraint_name; - -CREATE VIEW table_fkeys AS +CREATE OR REPLACE VIEW table_fkeys AS SELECT name AS "Constraint", format('FOREIGN KEY (%s) REFERENCES %s(%s)', string_agg(DISTINCT quote_ident(column_name), ', '), @@ -37,8 +31,7 @@ SELECT name AS "Constraint", "relid" FROM table_fkey_cols GROUP BY (name, relid); - -CREATE VIEW table_attrs AS +CREATE OR REPLACE VIEW table_attrs AS SELECT c.column_name AS "name", c.data_type AS "type", CASE @@ -53,8 +46,7 @@ SELECT c.column_name AS "name", format('%I.%I', c.table_schema, c.table_name)::regclass::oid AS "relid" FROM information_schema.columns AS c ORDER BY ordinal_position; - -CREATE VIEW table_desc AS +CREATE OR REPLACE VIEW table_desc AS SELECT "name" AS "Column", "type" || "modifier" AS "Type", rtrim(( @@ -69,7 +61,6 @@ SELECT "name" AS "Column", )) AS "Modifiers", "relid" FROM table_attrs; - CREATE OR REPLACE VIEW table_checks AS SELECT c.conname AS "Constraint", @@ -83,8 +74,7 @@ WHERE c.contype <> 'n' -- drop NOT NULL AND c.conbin IS NOT NULL -- only things with an expression (i.e., CHECKs) AND c.conrelid <> 0 -- table-level (exclude domain checks) ORDER BY "Constraint", "Definition"; - -CREATE VIEW index_attrs AS +CREATE OR REPLACE VIEW index_attrs AS WITH indexoid AS ( SELECT c.oid, n.nspname, @@ -107,12 +97,3 @@ WHERE true AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attrelid, a.attnum; - -$desc_views$ -); - run_command_on_master_and_workers_temp ---------------------------------------------------------------------- - -(1 row) - -DROP FUNCTION run_command_on_master_and_workers_temp(p_sql text); diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out index be46238b124..7f987a54d79 100644 --- a/src/test/regress/expected/multi_test_helpers.out +++ b/src/test/regress/expected/multi_test_helpers.out @@ -192,9 +192,9 @@ RETURNS jsonb AS $func$ EXECUTE format( $$ SELECT jsonb_agg(to_jsonb(q1.*) ORDER BY q1.constraint_names) AS fkeys_with_different_config FROM ( - SELECT array_agg(constraint_name ORDER BY constraint_oid) AS constraint_names, - array_agg(referencing_table::regclass::text ORDER BY constraint_oid) AS referencing_tables, - array_agg(referenced_table::regclass::text ORDER BY constraint_oid) AS referenced_tables, + SELECT array_agg(constraint_name ORDER BY constraint_name) AS constraint_names, + array_agg(referencing_table::regclass::text ORDER BY constraint_name) AS referencing_tables, + array_agg(referenced_table::regclass::text ORDER BY constraint_name) AS referenced_tables, referencing_columns, referenced_columns, deferable, deferred, on_update, on_delete, match_type, referencing_columns_set_null_or_default FROM ( SELECT @@ -239,8 +239,8 @@ RETURNS jsonb AS $func$ EXECUTE format( $$ SELECT jsonb_agg(to_jsonb(q1.*) ORDER BY q1.indexnames) AS index_defs FROM ( - SELECT array_agg(indexname ORDER BY indexrelid) AS indexnames, - array_agg(indexdef ORDER BY indexrelid) AS indexdefs + SELECT array_agg(indexname ORDER BY indexrelid::regclass::text) AS indexnames, + array_agg(indexdef ORDER BY indexrelid::regclass::text) AS indexdefs FROM pg_indexes JOIN pg_index ON (indexrelid = (schemaname || '.' || indexname)::regclass) @@ -266,7 +266,7 @@ RETURNS jsonb AS $func$ SELECT column_name, column_default::text, generation_expression::text FROM information_schema.columns WHERE table_schema = '%1$s' AND table_name = '%2$s' AND - column_default IS NOT NULL OR generation_expression IS NOT NULL + (column_default IS NOT NULL OR generation_expression IS NOT NULL) ) q1 $$, schemaname, tablename) INTO result; @@ -282,7 +282,7 @@ RETURNS jsonb AS $func$ $$ SELECT to_jsonb(q2.*) FROM ( SELECT relnames, jsonb_agg(to_jsonb(q1.*) - 'relnames' ORDER BY q1.column_name) AS column_attrs FROM ( - SELECT array_agg(attrelid::regclass::text ORDER BY attrelid) AS relnames, + SELECT array_agg(attrelid::regclass::text ORDER BY attrelid::regclass::text) AS relnames, attname AS column_name, typname AS type_name, collname AS collation_name, attcompression AS compression_method, attnotnull AS not_null FROM pg_attribute pa LEFT JOIN pg_type pt ON (pa.atttypid = pt.oid) diff --git a/src/test/regress/expected/multi_unsupported_worker_operations.out b/src/test/regress/expected/multi_unsupported_worker_operations.out index 665ba4e5ec9..dedf144a678 100644 --- a/src/test/regress/expected/multi_unsupported_worker_operations.out +++ b/src/test/regress/expected/multi_unsupported_worker_operations.out @@ -116,15 +116,20 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_tabl \d mx_test_index -- citus_drop_all_shards -SELECT citus_drop_all_shards('mx_table'::regclass, 'public', 'mx_table'); -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. -SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_table'::regclass; +BEGIN; + SELECT citus_drop_all_shards('mx_table'::regclass, 'public', 'mx_table'); + citus_drop_all_shards +--------------------------------------------------------------------- + 11 +(1 row) + + SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_table'::regclass; count --------------------------------------------------------------------- - 11 + 0 (1 row) +ROLLBACK; -- master_add_inactive_node SELECT 1 FROM master_add_inactive_node('localhost', 5432); ERROR: operation is not allowed on this node @@ -247,18 +252,26 @@ SELECT count(*) FROM mx_table; (1 row) -- master_drop_distributed_table_metadata -SELECT master_remove_distributed_table_metadata_from_workers('mx_table'::regclass, 'public', 'mx_table'); -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. -SELECT master_remove_partition_metadata('mx_table'::regclass, 'public', 'mx_table'); -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. -SELECT count(*) FROM mx_table; +BEGIN; + SELECT master_remove_distributed_table_metadata_from_workers('mx_table'::regclass, 'public', 'mx_table'); + master_remove_distributed_table_metadata_from_workers +--------------------------------------------------------------------- + +(1 row) + + SELECT master_remove_partition_metadata('mx_table'::regclass, 'public', 'mx_table'); + master_remove_partition_metadata +--------------------------------------------------------------------- + +(1 row) + + SELECT count(*) FROM mx_table; count --------------------------------------------------------------------- - 5 + 0 (1 row) +ROLLBACK; -- citus_copy_shard_placement SELECT logicalrelid, shardid AS testshardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement @@ -308,3 +321,10 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_port); (1 row) RESET citus.shard_replication_factor; +-- start metadata sync to node again to make the test re-runnable +SELECT start_metadata_sync_to_node('localhost', :worker_2_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/pg18.out b/src/test/regress/expected/pg18.out index 0866b936c23..28034124a5c 100644 --- a/src/test/regress/expected/pg18.out +++ b/src/test/regress/expected/pg18.out @@ -3016,8 +3016,8 @@ SET citus.explain_all_tasks TO default; SELECT * FROM pg_get_loaded_modules() WHERE file_name LIKE 'citus%' ORDER BY module_name; module_name | version | file_name --------------------------------------------------------------------- - citus | 14.0.0 | citus.so - citus_columnar | 14.0.0 | citus_columnar.so + citus | 14.1.0 | citus.so + citus_columnar | 14.1.0 | citus_columnar.so (2 rows) -- ============================================================ diff --git a/src/test/regress/expected/schema_based_sharding.out b/src/test/regress/expected/schema_based_sharding.out index 711c3914137..8b247c8a398 100644 --- a/src/test/regress/expected/schema_based_sharding.out +++ b/src/test/regress/expected/schema_based_sharding.out @@ -1483,16 +1483,6 @@ REVOKE CREATE ON DATABASE regression FROM test_non_super_user; REVOKE CREATE ON SCHEMA public FROM test_non_super_user; DROP ROLE test_non_super_user; \c - - - :worker_1_port --- test creating a tenant table from workers -CREATE TABLE tenant_3.tbl_1(a int, b text); -ERROR: cannot create tables in a distributed schema from a worker node -HINT: Connect to the coordinator node and try again. --- test creating a tenant schema from workers -SET citus.enable_schema_based_sharding TO ON; -CREATE SCHEMA worker_tenant_schema; -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. -SET citus.enable_schema_based_sharding TO OFF; -- Enable the GUC on workers to make sure that the CREATE SCHEMA/ TABLE -- commands that we send to workers don't recursively try creating a -- tenant schema / table. @@ -1511,15 +1501,13 @@ SELECT pg_reload_conf(); t (1 row) --- Verify that citus_internal.unregister_tenant_schema_globally is a no-op --- on workers. +-- Verify that citus_internal.unregister_tenant_schema_globally can be called +-- from workers too, but it will fail for this case as we didn't yet drop the +-- schema. SELECT citus_internal.unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); - unregister_tenant_schema_globally ---------------------------------------------------------------------- - -(1 row) - +ERROR: schema is expected to be already dropped because this function is only expected to be called from Citus drop hook \c - - - :master_port +SET client_min_messages TO WARNING; SET search_path TO regular_schema; SET citus.next_shard_id TO 1950000; SET citus.shard_count TO 32; diff --git a/src/test/regress/expected/schema_based_sharding_from_workers_a.out b/src/test/regress/expected/schema_based_sharding_from_workers_a.out new file mode 100644 index 00000000000..91dbaffec72 --- /dev/null +++ b/src/test/regress/expected/schema_based_sharding_from_workers_a.out @@ -0,0 +1,1983 @@ +-- This is heavily based on schema_based_sharding.sql test file. +-- Only differences are; +-- - we don't check some of the the functionality tested there (e.g., testing of some of the internal UDFs) +-- - we test schema-based sharding features (e.g., DDLs, query etc.) using the same SQL from workers this time +-- - when we verify the things, we always make sure to do that on all nodes to ensure that we consistently sync +-- metadata changes when a command is issued from the workers too +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SET client_min_messages TO NOTICE; +SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- When creating a tenant table from workers, we always fetch the next shard id +-- and placement id from the coordinator because we never sync those sequences to +-- workers. For this reason, along this test file, we always set the next shard id +-- on the coordinator when needed, rather than setting it on the current worker node. +-- +-- Note that setting citus.next_shard_id on the coordinator would not work if the +-- citus internal connection we use to execute master_get_new_shardid() on the +-- coordinator changes because the underlying function, GetNextShardIdInternal(), +-- just increments NextShardId for the current session. For this reason, we instead +-- set pg_dist_shardid_seq on the coordinator in the tests where we test creating +-- distributed tables from a worker and where we want to use consistent shard ids. +-- +-- At the end of the test file, we reset pg_dist_shardid_seq. +SELECT last_value::bigint INTO pg_dist_shardid_seq_prev_state FROM pg_catalog.pg_dist_shardid_seq; +\c - - - :worker_1_port +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2050000;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +SET citus.enable_schema_based_sharding TO ON; +-- empty tenant +CREATE SCHEMA "tenant\'_1"; +CREATE SCHEMA IF NOT EXISTS "tenant\'_1"; +NOTICE: schema "tenant\'_1" already exists, skipping +-- non-empty tenant +CREATE SCHEMA "tenant\'_2"; +CREATE TABLE "tenant\'_2".test_table(a int, b text); +-- empty tenant +CREATE SCHEMA "tenant\'_3"; +CREATE TABLE "tenant\'_3".test_table(a int, b text); +DROP TABLE "tenant\'_3".test_table; +\c - - - :master_port +-- add a node after creating tenant schemas +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2050100;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +CREATE SCHEMA regular_schema; +SET search_path TO regular_schema; +-- Verify that citus_internal.unregister_tenant_schema_globally can only +-- be called on schemas that are dropped already. +SELECT citus_internal.unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); +ERROR: schema is expected to be already dropped because this function is only expected to be called from Citus drop hook +-- show that regular_schema doesn't show up in pg_dist_schema +SELECT COUNT(*)=0 FROM pg_dist_schema WHERE schemaid::regnamespace::text = 'regular_schema'; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +CREATE TABLE regular_schema.citus_local_tbl(id int); +SELECT citus_add_local_table_to_metadata('regular_schema.citus_local_tbl'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE regular_schema.hash_dist_tbl(id int); +SELECT create_distributed_table('regular_schema.hash_dist_tbl', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE regular_schema.ref_tbl(id int PRIMARY KEY); +SELECT create_reference_table('regular_schema.ref_tbl'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE regular_schema.ref_tbl_1(id int PRIMARY KEY); +SELECT create_reference_table('regular_schema.ref_tbl_1'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE regular_schema.pg_local_tbl3(id int REFERENCES regular_schema.ref_tbl_1(id)); +CREATE TABLE regular_schema.citus_local_partitioned_table(a int, b text) PARTITION BY RANGE (a); +SELECT citus_add_local_table_to_metadata('regular_schema.citus_local_partitioned_table'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE regular_schema.dist_partitioned_table(a int, b text) PARTITION BY RANGE (a); +SELECT create_distributed_table('regular_schema.dist_partitioned_table', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE regular_schema.parent_attach_test_citus_local(a int, b text) PARTITION BY RANGE (a); +SELECT citus_add_local_table_to_metadata('regular_schema.parent_attach_test_citus_local'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE regular_schema.parent_attach_test_dist(a int, b text) PARTITION BY RANGE (a); +SELECT create_distributed_table('regular_schema.parent_attach_test_dist', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE regular_schema.child_attach_test_citus_local(a int, b text); +SELECT citus_add_local_table_to_metadata('regular_schema.child_attach_test_citus_local'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE regular_schema.child_attach_test_dist(a int, b text); +SELECT create_distributed_table('regular_schema.child_attach_test_dist', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE regular_schema.citus_local(a int, b text); +SELECT citus_add_local_table_to_metadata('regular_schema.citus_local'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE regular_schema.dist(a int, b text); +SELECT create_distributed_table('regular_schema.dist', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TYPE regular_schema.employee_type AS (name text, salary numeric); +CREATE TABLE regular_schema.reference_table(a int PRIMARY KEY); +SELECT create_reference_table('regular_schema.reference_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +CREATE FUNCTION regular_schema.increment_one() +RETURNS void +LANGUAGE plpgsql +AS $$ +BEGIN + UPDATE search_path_test SET a = a + 1; +END; +$$; +CREATE FUNCTION regular_schema.decrement_one() +RETURNS void +LANGUAGE plpgsql +AS $$ +BEGIN + UPDATE search_path_test SET a = a - 1; +END; +$$; +CREATE SCHEMA regular_schema_1; +CREATE TABLE regular_schema_1.dist_table(a int, b text); +SELECT create_distributed_table('regular_schema_1.dist_table', 'a', shard_count => 4); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +\c - - - :worker_1_port +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2050300;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +ALTER SCHEMA "tenant\'_1" RENAME TO tenant_1; +ALTER SCHEMA "tenant\'_2" RENAME TO tenant_2; +ALTER SCHEMA "tenant\'_3" RENAME TO tenant_3; +-- verify we cannot set tenant table's schema to regular schema from workers +CREATE TABLE tenant_2.test_table2(id int); +ALTER TABLE tenant_2.test_table2 SET SCHEMA regular_schema; +ERROR: moving distributed schema tables to another schema from workers is not supported yet +-- verify we can set regular table's schema to distributed schema +CREATE TABLE regular_schema.test_table3(id int); +ALTER TABLE regular_schema.test_table3 SET SCHEMA tenant_2; +NOTICE: Moving test_table3 into distributed schema tenant_2 +-- verify that tenant_2.test_table3 is recorded in pg_dist_partition as a single-shard table. +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=1 FROM pg_dist_partition +WHERE logicalrelid = 'tenant_2.test_table3'::regclass AND + partmethod = 'n' AND repmodel = 's' AND colocationid > 0; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- verify that regular_schema.test_table3 does not exist +SELECT * FROM regular_schema.test_table3; +ERROR: relation "regular_schema.test_table3" does not exist +-- verify we cannot set tenant table's schema to another distributed schema from workers +CREATE TABLE tenant_2.test_table4(id int); +ALTER TABLE tenant_2.test_table4 SET SCHEMA tenant_3; +ERROR: moving distributed schema tables to another schema from workers is not supported yet +-- verify that we can put a local table in regular schema into distributed schema +CREATE TABLE regular_schema.pg_local_tbl(id int); +ALTER TABLE regular_schema.pg_local_tbl SET SCHEMA tenant_2; +NOTICE: Moving pg_local_tbl into distributed schema tenant_2 +-- verify that we can put a Citus local table in regular schema into distributed schema +ALTER TABLE regular_schema.citus_local_tbl SET SCHEMA tenant_2; +NOTICE: Moving citus_local_tbl into distributed schema tenant_2 +-- verify that we do not allow a hash distributed table in regular schema into distributed schema +ALTER TABLE regular_schema.hash_dist_tbl SET SCHEMA tenant_2; +ERROR: distributed schema cannot have distributed tables +HINT: Undistribute distributed tables before 'ALTER TABLE SET SCHEMA'. +-- verify that we do not allow a reference table in regular schema into distributed schema +ALTER TABLE regular_schema.ref_tbl SET SCHEMA tenant_2; +ERROR: distributed schema cannot have distributed tables +HINT: Undistribute distributed tables before 'ALTER TABLE SET SCHEMA'. +-- verify that we cannot put a table in tenant schema into regular schema +CREATE TABLE tenant_2.tenant_tbl(id int); +ALTER TABLE tenant_2.tenant_tbl SET SCHEMA regular_schema; +ERROR: moving distributed schema tables to another schema from workers is not supported yet +-- verify that we cannot put a table in tenant schema into another tenant schema +CREATE TABLE tenant_2.tenant_tbl2(id int); +ALTER TABLE tenant_2.tenant_tbl2 SET SCHEMA tenant_3; +ERROR: moving distributed schema tables to another schema from workers is not supported yet +-- verify that we do not allow a local table in regular schema into distributed schema if it has foreign key to a non-reference table in another schema +CREATE TABLE regular_schema.pg_local_tbl1(id int PRIMARY KEY); +CREATE TABLE regular_schema.pg_local_tbl2(id int REFERENCES regular_schema.pg_local_tbl1(id)); +ALTER TABLE regular_schema.pg_local_tbl2 SET SCHEMA tenant_2; +ERROR: foreign keys from distributed schemas can only point to the same distributed schema or reference tables in regular schemas +DETAIL: "tenant_2.pg_local_tbl2" references "regular_schema.pg_local_tbl1" via foreign key constraint "pg_local_tbl2_id_fkey" +-- verify that we allow a local table in regular schema into distributed schema if it has foreign key to a reference table in another schema +ALTER TABLE regular_schema.pg_local_tbl3 SET SCHEMA tenant_2; +NOTICE: Moving pg_local_tbl3 into distributed schema tenant_2 +-- verify that we do not allow a table in tenant schema into regular schema if it has foreign key to/from another table in the same schema +DROP TABLE tenant_2.tenant_tbl2; +CREATE TABLE tenant_2.tenant_tbl1(id int PRIMARY KEY); +CREATE TABLE tenant_2.tenant_tbl2(id int REFERENCES tenant_2.tenant_tbl1(id)); +ALTER TABLE tenant_2.tenant_tbl1 SET SCHEMA regular_schema; +ERROR: set schema is not allowed for table tenant_tbl1 in distributed schema tenant_2 +DETAIL: distributed schemas cannot have foreign keys from/to local tables or different schema +ALTER TABLE tenant_2.tenant_tbl2 SET SCHEMA regular_schema; +ERROR: set schema is not allowed for table tenant_tbl2 in distributed schema tenant_2 +DETAIL: distributed schemas cannot have foreign keys from/to local tables or different schema +-- verify that we do not allow a table in distributed schema into another distributed schema if it has foreign key to/from another table in the same schema +CREATE TABLE tenant_2.tenant_tbl3(id int PRIMARY KEY); +CREATE TABLE tenant_2.tenant_tbl4(id int REFERENCES tenant_2.tenant_tbl3(id)); +ALTER TABLE tenant_2.tenant_tbl3 SET SCHEMA tenant_3; +ERROR: set schema is not allowed for table tenant_tbl3 in distributed schema tenant_2 +DETAIL: distributed schemas cannot have foreign keys from/to local tables or different schema +ALTER TABLE tenant_2.tenant_tbl4 SET SCHEMA tenant_3; +ERROR: set schema is not allowed for table tenant_tbl4 in distributed schema tenant_2 +DETAIL: distributed schemas cannot have foreign keys from/to local tables or different schema +-- alter set non-existent schema +ALTER TABLE tenant_2.test_table SET SCHEMA ghost_schema; +ERROR: schema "ghost_schema" does not exist +ALTER TABLE IF EXISTS tenant_2.test_table SET SCHEMA ghost_schema; +ERROR: schema "ghost_schema" does not exist +-- alter set non-existent table +ALTER TABLE tenant_2.ghost_table SET SCHEMA ghost_schema; +ERROR: relation "tenant_2.ghost_table" does not exist +ALTER TABLE IF EXISTS tenant_2.ghost_table SET SCHEMA ghost_schema; +NOTICE: relation "ghost_table" does not exist, skipping +-- verify that colocation id is set for empty tenants too +SELECT result FROM run_command_on_all_nodes($$ + SELECT array_agg(colocationid > 0) FROM pg_dist_schema + WHERE schemaid::regnamespace::text IN ('tenant_1', 'tenant_3'); +$$); + result +--------------------------------------------------------------------- + {t,t} + {t,t} + {t,t} +(3 rows) + +-- Verify that tenant_2.test_table is recorded in pg_dist_partition as a +-- single-shard table. +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=1 FROM pg_dist_partition +WHERE logicalrelid = 'tenant_2.test_table'::regclass AND + partmethod = 'n' AND repmodel = 's' AND colocationid > 0; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- verify that colocation id is properly set for non-empty tenant schema +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid = ( + SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_2.test_table'::regclass + ) + FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_2'; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- create a tenant table for tenant_1 after add_node +CREATE TABLE tenant_1.test_table(a int, b text); +-- verify that colocation id is properly set for now-non-empty tenant schema +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid = ( + SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_1.test_table'::regclass + ) + FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_1'; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- verify that tenant_1 and tenant_2 have different colocation ids +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(DISTINCT(colocationid))=2 FROM pg_dist_schema +WHERE schemaid::regnamespace::text IN ('tenant_1', 'tenant_2'); +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- verify that we don't allow creating tenant tables via CREATE SCHEMA command +CREATE SCHEMA schema_using_schema_elements CREATE TABLE test_table(a int, b text); +ERROR: cannot create distributed schema and table in a single statement +HINT: SET citus.enable_schema_based_sharding TO off, or create the schema and table in separate commands. +CREATE SCHEMA tenant_4; +CREATE TABLE tenant_4.tbl_1(a int, b text); +CREATE TABLE tenant_4.tbl_2(a int, b text); +-- verify that we don't allow creating a foreign table in a tenant schema, with a nice error message +CREATE FOREIGN TABLE tenant_4.foreign_table ( + id bigint not null, + full_name text not null default '' +) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true', table_name 'foreign_table'); +ERROR: cannot create a foreign table in a distributed schema +-- verify that we don't allow creating a foreign table in a tenant schema +CREATE TEMPORARY TABLE tenant_4.temp_table (a int, b text); +ERROR: cannot create temporary relation in non-temporary schema +CREATE TABLE tenant_4.partitioned_table(a int, b text, PRIMARY KEY (a)) PARTITION BY RANGE (a); +CREATE TABLE tenant_4.partitioned_table_child_1 PARTITION OF tenant_4.partitioned_table FOR VALUES FROM (1) TO (2); +CREATE TABLE tenant_4.another_partitioned_table(a int, b text, FOREIGN KEY (a) REFERENCES tenant_4.partitioned_table(a)) PARTITION BY RANGE (a); +CREATE TABLE tenant_4.another_partitioned_table_child PARTITION OF tenant_4.another_partitioned_table FOR VALUES FROM (1) TO (2); +-- verify that we allow creating partitioned tables in a tenant schema +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=1 FROM pg_dist_partition +WHERE logicalrelid = 'tenant_4.partitioned_table_child_1'::regclass AND + partmethod = 'n' AND repmodel = 's' AND colocationid = ( + SELECT colocationid FROM pg_dist_partition + WHERE logicalrelid = 'tenant_4.partitioned_table'::regclass); +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ +SELECT EXISTS( + SELECT 1 + FROM pg_inherits + WHERE inhrelid = 'tenant_4.partitioned_table_child_1'::regclass AND + inhparent = 'tenant_4.partitioned_table'::regclass +) AS is_partition; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=1 FROM pg_dist_partition +WHERE logicalrelid = 'tenant_4.another_partitioned_table_child'::regclass AND + partmethod = 'n' AND repmodel = 's' AND colocationid = ( + SELECT colocationid FROM pg_dist_partition + WHERE logicalrelid = 'tenant_4.another_partitioned_table'::regclass); +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ +SELECT EXISTS( + SELECT 1 + FROM pg_inherits + WHERE inhrelid = 'tenant_4.another_partitioned_table_child'::regclass AND + inhparent = 'tenant_4.another_partitioned_table'::regclass +) AS is_partition; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- verify the foreign key between parents +SELECT result FROM run_command_on_all_nodes($$ +SELECT EXISTS( + SELECT 1 + FROM pg_constraint + WHERE conrelid = 'tenant_4.another_partitioned_table'::regclass AND + confrelid = 'tenant_4.partitioned_table'::regclass AND + contype = 'f' +) AS foreign_key_exists; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- We want to hide the error message context because the node reporting the foreign key +-- violation might change from one run to another. +\set VERBOSITY terse +INSERT INTO tenant_4.another_partitioned_table VALUES (1, 'a'); +ERROR: insert or update on table "another_partitioned_table_child_2050316" violates foreign key constraint "another_partitioned_table_a_fkey_2050315" +\set VERBOSITY default +INSERT INTO tenant_4.partitioned_table VALUES (1, 'a'); +INSERT INTO tenant_4.another_partitioned_table VALUES (1, 'a'); +CREATE SCHEMA tenant_5; +CREATE TABLE tenant_5.tbl_1(a int, b text); +CREATE TABLE tenant_5.partitioned_table(a int, b text) PARTITION BY RANGE (a); +-- verify that we don't allow creating a partition table that is child of a partitioned table in a different tenant schema +CREATE TABLE tenant_4.partitioned_table_child_2 PARTITION OF tenant_5.partitioned_table FOR VALUES FROM (1) TO (2); +ERROR: partitioning within a distributed schema is not supported when the parent and the child are in different schemas +-- verify that we don't allow creating a local partition table that is child of a tenant partitioned table +CREATE TABLE regular_schema.local_child_table PARTITION OF tenant_5.partitioned_table FOR VALUES FROM (1) TO (2); +ERROR: partitioning within a distributed schema is not supported when the parent and the child are in different schemas +SET citus.use_citus_managed_tables TO ON; +CREATE TABLE regular_schema.local_child_table PARTITION OF tenant_5.partitioned_table FOR VALUES FROM (1) TO (2); +ERROR: partitioning within a distributed schema is not supported when the parent and the child are in different schemas +RESET citus.use_citus_managed_tables; +CREATE TABLE regular_schema.local_partitioned_table(a int, b text) PARTITION BY RANGE (a); +-- verify that we don't allow creating a partition table that is child of a non-tenant partitioned table +CREATE TABLE tenant_4.partitioned_table_child_2 PARTITION OF regular_schema.local_partitioned_table FOR VALUES FROM (1) TO (2); +ERROR: partitioning within a distributed schema is not supported when the parent and the child are in different schemas +CREATE TABLE tenant_4.partitioned_table_child_2 PARTITION OF regular_schema.citus_local_partitioned_table FOR VALUES FROM (1) TO (2); +ERROR: partitioning within a distributed schema is not supported when the parent and the child are in different schemas +CREATE TABLE tenant_4.partitioned_table_child_2 PARTITION OF regular_schema.dist_partitioned_table FOR VALUES FROM (1) TO (2); +ERROR: partitioning within a distributed schema is not supported when the parent and the child are in different schemas +CREATE TABLE tenant_4.parent_attach_test(a int, b text) PARTITION BY RANGE (a); +CREATE TABLE tenant_4.child_attach_test(a int, b text); +CREATE TABLE tenant_5.parent_attach_test(a int, b text) PARTITION BY RANGE (a); +CREATE TABLE tenant_5.child_attach_test(a int, b text); +CREATE TABLE regular_schema.parent_attach_test_local(a int, b text) PARTITION BY RANGE (a); +CREATE TABLE regular_schema.child_attach_test_local(a int, b text); +-- verify that we don't allow attaching a tenant table into a tenant partitioned table, if they are not in the same schema +ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION tenant_5.child_attach_test FOR VALUES FROM (1) TO (2); +ERROR: partitioning within a distributed schema is not supported when the parent and the child are in different schemas +-- verify that we don't allow attaching a non-tenant table into a tenant partitioned table +ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION regular_schema.child_attach_test_local FOR VALUES FROM (1) TO (2); +ERROR: partitioning within a distributed schema is not supported when the parent and the child are in different schemas +ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION regular_schema.child_attach_test_citus_local FOR VALUES FROM (1) TO (2); +ERROR: partitioning within a distributed schema is not supported when the parent and the child are in different schemas +ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION regular_schema.child_attach_test_dist FOR VALUES FROM (1) TO (2); +ERROR: partitioning within a distributed schema is not supported when the parent and the child are in different schemas +-- verify that we don't allow attaching a tenant table into a non-tenant partitioned table +ALTER TABLE regular_schema.parent_attach_test_local ATTACH PARTITION tenant_4.child_attach_test FOR VALUES FROM (1) TO (2); +ERROR: partitioning within a distributed schema is not supported when the parent and the child are in different schemas +ALTER TABLE regular_schema.parent_attach_test_citus_local ATTACH PARTITION tenant_4.child_attach_test FOR VALUES FROM (1) TO (2); +ERROR: partitioning within a distributed schema is not supported when the parent and the child are in different schemas +ALTER TABLE regular_schema.parent_attach_test_dist ATTACH PARTITION tenant_4.child_attach_test FOR VALUES FROM (1) TO (2); +ERROR: partitioning within a distributed schema is not supported when the parent and the child are in different schemas +ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION tenant_4.child_attach_test FOR VALUES FROM (1) TO (2); +-- verify that we don't allow multi-level partitioning on tenant tables +CREATE TABLE tenant_4.multi_level_test(a int, b text) PARTITION BY RANGE (a); +ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION tenant_4.multi_level_test FOR VALUES FROM (1) TO (2); +ERROR: Citus doesn't support multi-level partitioned tables +DETAIL: Relation "multi_level_test" is partitioned table itself and it is also partition of relation "parent_attach_test". +-- verify that we allow attaching a tenant table into a tenant partitioned table, if they are in the same schema +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=1 FROM pg_dist_partition +WHERE logicalrelid = 'tenant_4.parent_attach_test'::regclass AND + partmethod = 'n' AND repmodel = 's' AND colocationid = ( + SELECT colocationid FROM pg_dist_partition + WHERE logicalrelid = 'tenant_4.child_attach_test'::regclass); +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ +SELECT EXISTS( + SELECT 1 + FROM pg_inherits + WHERE inhrelid = 'tenant_4.child_attach_test'::regclass AND + inhparent = 'tenant_4.parent_attach_test'::regclass +) AS is_partition; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- verify that we allow detaching a tenant partition from a tenant partitioned table +ALTER TABLE tenant_4.parent_attach_test DETACH PARTITION tenant_4.child_attach_test; +-- verify they're still sharing the same colocation group +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=1 FROM pg_dist_partition +WHERE logicalrelid = 'tenant_4.parent_attach_test'::regclass AND + partmethod = 'n' AND repmodel = 's' AND colocationid = ( + SELECT colocationid FROM pg_dist_partition + WHERE logicalrelid = 'tenant_4.child_attach_test'::regclass); +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- verify that they're no longer in parent-child relationship +SELECT result FROM run_command_on_all_nodes($$ +SELECT NOT EXISTS( + SELECT 1 + FROM pg_inherits + WHERE inhrelid = 'tenant_4.child_attach_test'::regclass AND + inhparent = 'tenant_4.parent_attach_test'::regclass +) AS is_partition; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- errors out because shard replication factor > 1 +SET citus.shard_replication_factor TO 2; +CREATE TABLE tenant_4.tbl_3 AS SELECT 1 AS a, 'text' as b; +ERROR: could not create single shard table: citus.shard_replication_factor is greater than 1 +HINT: Consider setting citus.shard_replication_factor to 1 and try again +SET citus.shard_replication_factor TO 1; +-- verify that we allow creating tenant tables by using CREATE TABLE AS / SELECT INTO commands +CREATE TABLE tenant_4.tbl_3 AS SELECT 1 AS a, 'text' as b; +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$tenant_4.tbl_3$$) +CREATE TEMP TABLE IF NOT EXISTS tenant_4.tbl_4 AS SELECT 1 as a, 'text' as b; +ERROR: cannot create temporary relation in non-temporary schema +CREATE UNLOGGED TABLE IF NOT EXISTS tenant_4.tbl_4 AS SELECT 1 as a, 'text' as b WITH NO DATA; +-- the same command, no changes because of IF NOT EXISTS +CREATE UNLOGGED TABLE IF NOT EXISTS tenant_4.tbl_4 AS SELECT 1 as a, 'text' as b WITH NO DATA; +NOTICE: relation "tbl_4" already exists, skipping +SELECT 1 as a, 'text' as b INTO tenant_4.tbl_5; +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$tenant_4.tbl_5$$) +-- verify we can query the newly created tenant tables from any node +SELECT result FROM run_command_on_all_nodes($$ +SELECT jsonb_agg( + jsonb_build_object( + 'a', a, + 'b', b + ) + ORDER BY a + ) + FROM tenant_4.tbl_3 +$$); + result +--------------------------------------------------------------------- + [{"a": 1, "b": "text"}] + [{"a": 1, "b": "text"}] + [{"a": 1, "b": "text"}] +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*) FROM tenant_4.tbl_5 +$$); + result +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +-- verify that we don't allow creating tenant tables by using CREATE TABLE OF commands +CREATE TABLE tenant_4.employees OF regular_schema.employee_type ( + PRIMARY KEY (name), + salary WITH OPTIONS DEFAULT 1000 +); +ERROR: cannot create tables in a distributed schema using CREATE TABLE OF syntax +-- verify that we act accordingly when if not exists is used +CREATE TABLE IF NOT EXISTS tenant_4.tbl_6(a int, b text); +CREATE TABLE IF NOT EXISTS tenant_4.tbl_6(a int, b text); +NOTICE: relation "tbl_6" already exists, skipping +SELECT result FROM run_command_on_all_nodes($$ +SELECT jsonb_agg( + jsonb_build_object( + 'logicalrelid', logicalrelid, + 'partmethod', partmethod + ) + ORDER BY logicalrelid::text + ) + FROM pg_dist_partition + WHERE logicalrelid::text LIKE 'tenant_4.tbl%' +$$); + result +--------------------------------------------------------------------- + [{"partmethod": "n", "logicalrelid": "tenant_4.tbl_1"}, {"partmethod": "n", "logicalrelid": "tenant_4.tbl_2"}, {"partmethod": "n", "logicalrelid": "tenant_4.tbl_3"}, {"partmethod": "n", "logicalrelid": "tenant_4.tbl_4"}, {"partmethod": "n", "logicalrelid": "tenant_4.tbl_5"}, {"partmethod": "n", "logicalrelid": "tenant_4.tbl_6"}] + [{"partmethod": "n", "logicalrelid": "tenant_4.tbl_1"}, {"partmethod": "n", "logicalrelid": "tenant_4.tbl_2"}, {"partmethod": "n", "logicalrelid": "tenant_4.tbl_3"}, {"partmethod": "n", "logicalrelid": "tenant_4.tbl_4"}, {"partmethod": "n", "logicalrelid": "tenant_4.tbl_5"}, {"partmethod": "n", "logicalrelid": "tenant_4.tbl_6"}] + [{"partmethod": "n", "logicalrelid": "tenant_4.tbl_1"}, {"partmethod": "n", "logicalrelid": "tenant_4.tbl_2"}, {"partmethod": "n", "logicalrelid": "tenant_4.tbl_3"}, {"partmethod": "n", "logicalrelid": "tenant_4.tbl_4"}, {"partmethod": "n", "logicalrelid": "tenant_4.tbl_5"}, {"partmethod": "n", "logicalrelid": "tenant_4.tbl_6"}] +(3 rows) + +CREATE TABLE regular_schema.local(a int, b text); +-- verify that we can create a table LIKE another table +CREATE TABLE tenant_5.test_table_like_1(LIKE tenant_5.tbl_1); -- using a table from the same schema +CREATE TABLE tenant_5.test_table_like_2(LIKE tenant_4.tbl_1); -- using a table from another schema +CREATE TABLE tenant_5.test_table_like_3(LIKE regular_schema.local); -- using a local table +CREATE TABLE tenant_5.test_table_like_4(LIKE regular_schema.citus_local); -- using a citus local table +CREATE TABLE tenant_5.test_table_like_5(LIKE regular_schema.dist); -- using a distributed table +-- verify that all of them are converted to tenant tables +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*) = 5 +FROM pg_dist_partition +WHERE logicalrelid::text LIKE 'tenant_5.test_table_like_%' AND + partmethod = 'n' AND repmodel = 's' AND colocationid = ( + SELECT colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_5' + ); +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +CREATE TABLE regular_schema.local_table_using_like(LIKE tenant_5.tbl_1); +-- verify that regular_schema.local_table_using_like is not a tenant table +SELECT COUNT(*) = 0 FROM pg_dist_partition +WHERE logicalrelid = 'regular_schema.local_table_using_like'::regclass; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +-- verify that INHERITS syntax is not supported when creating a tenant table +CREATE TABLE tenant_5.test_table_inherits_1(x int) INHERITS (tenant_5.tbl_1); -- using a table from the same schema +ERROR: tables in a distributed schema cannot inherit or be inherited +CREATE TABLE tenant_5.test_table_inherits_2(x int) INHERITS (tenant_4.tbl_1); -- using a table from another schema +ERROR: tables in a distributed schema cannot inherit or be inherited +CREATE TABLE tenant_5.test_table_inherits_3(x int) INHERITS (regular_schema.local); -- using a local table +ERROR: tables in a distributed schema cannot inherit or be inherited +CREATE TABLE tenant_5.test_table_inherits_4(x int) INHERITS (regular_schema.citus_local); -- using a citus local table +ERROR: tables in a distributed schema cannot inherit or be inherited +CREATE TABLE tenant_5.test_table_inherits_5(x int) INHERITS (regular_schema.dist); -- using a distributed table +ERROR: tables in a distributed schema cannot inherit or be inherited +-- verify that INHERITS syntax is not supported when creating a local table based on a tenant table +CREATE TABLE regular_schema.local_table_using_inherits(x int) INHERITS (tenant_5.tbl_1); +ERROR: tables in a distributed schema cannot inherit or be inherited +CREATE TABLE tenant_5.tbl_2(a int, b text); +CREATE SCHEMA "CiTuS.TeeN_108"; +ALTER SCHEMA "CiTuS.TeeN_108" RENAME TO citus_teen_proper; +SELECT schemaid AS citus_teen_schemaid FROM pg_dist_schema WHERE schemaid::regnamespace::text = 'citus_teen_proper' \gset +SELECT colocationid AS citus_teen_colocationid FROM pg_dist_schema WHERE schemaid::regnamespace::text = 'citus_teen_proper' \gset +SELECT result FROM run_command_on_all_nodes($$ + SELECT schemaid INTO citus_teen_schemaid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'citus_teen_proper' +$$); + result +--------------------------------------------------------------------- + SELECT 1 + SELECT 1 + SELECT 1 +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid INTO citus_teen_colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'citus_teen_proper' +$$); + result +--------------------------------------------------------------------- + SELECT 1 + SELECT 1 + SELECT 1 +(3 rows) + +-- verify that colocation id is set for the tenant with a weird name too +SELECT :citus_teen_colocationid > 0; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +-- verify that the same colocation id is used on other nodes too +SELECT format( + 'SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=1 FROM pg_dist_schema + WHERE schemaid::regnamespace::text = ''citus_teen_proper'' AND + colocationid = %s; + $$);', +:citus_teen_colocationid) AS verify_all_nodes_query \gset +:verify_all_nodes_query + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +ALTER SCHEMA citus_teen_proper RENAME TO "CiTuS.TeeN_108"; +SET citus.enable_schema_based_sharding TO OFF; +-- Show that the tables created in tenant schemas are considered to be +-- tenant tables even if the GUC was set to off when creating the table. +CREATE TABLE tenant_5.tbl_3(a int, b text); +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=1 FROM pg_dist_partition WHERE logicalrelid = 'tenant_5.tbl_3'::regclass; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SET citus.enable_schema_based_sharding TO ON; +-- Verify that tables that belong to tenant_4 and tenant_5 are stored on +-- different worker nodes due to order we followed when creating first tenant +-- tables in each of them. +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(DISTINCT(nodename, nodeport))=2 FROM citus_shards +WHERE table_name IN ('tenant_4.tbl_1'::regclass, 'tenant_5.tbl_1'::regclass); +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- show that all the tables in tenant_4 are colocated with each other. +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(DISTINCT(colocationid))=1 FROM pg_dist_partition +WHERE logicalrelid::regclass::text LIKE 'tenant_4.%'; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- verify the same for tenant_5 too +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(DISTINCT(colocationid))=1 FROM pg_dist_partition +WHERE logicalrelid::regclass::text LIKE 'tenant_5.%'; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT schemaid INTO tenant_4_schemaid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_4' +$$); + result +--------------------------------------------------------------------- + SELECT 1 + SELECT 1 + SELECT 1 +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid INTO tenant_4_colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_4' +$$); + result +--------------------------------------------------------------------- + SELECT 1 + SELECT 1 + SELECT 1 +(3 rows) + +SET client_min_messages TO WARNING; +-- Rename it to a name that contains a single quote to verify that we properly +-- escape its name when sending the command to delete the pg_dist_schema +-- entry on workers. +ALTER SCHEMA tenant_4 RENAME TO "tenant\'_4"; +DROP SCHEMA "tenant\'_4", "CiTuS.TeeN_108" CASCADE; +SET client_min_messages TO NOTICE; +-- Verify that dropping a tenant schema deletes the associated +-- pg_dist_schema entry and pg_dist_colocation too. +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE schemaid = (SELECT schemaid FROM tenant_4_schemaid) +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE schemaid = (SELECT schemaid FROM citus_teen_schemaid) +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE colocationid = (SELECT colocationid FROM tenant_4_colocationid) +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE colocationid = (SELECT colocationid FROM citus_teen_colocationid) +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + DROP TABLE tenant_4_schemaid, citus_teen_schemaid, tenant_4_colocationid, citus_teen_colocationid +$$); + result +--------------------------------------------------------------------- + DROP TABLE + DROP TABLE + DROP TABLE +(3 rows) + +\c - - - :master_port +SET client_min_messages TO NOTICE; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2050400;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +-- show that we don't allow colocating a Citus table with a tenant table +CREATE TABLE regular_schema.null_shard_key_1(a int, b text); +SELECT create_distributed_table('regular_schema.null_shard_key_1', null, colocate_with => 'tenant_5.tbl_2'); +ERROR: cannot colocate tables tbl_2 and null_shard_key_1 +DETAIL: Cannot colocate tables with distributed schema tables by using colocate_with option. +HINT: Consider using "CREATE TABLE" statement to create this table as a single-shard distributed table in the same schema to automatically colocate it with tenant_5.tbl_2 +SELECT create_distributed_table('regular_schema.null_shard_key_1', 'a', colocate_with => 'tenant_5.tbl_2'); +ERROR: cannot colocate tables tbl_2 and null_shard_key_1 +DETAIL: Distribution column types don't match for tbl_2 and null_shard_key_1. +CREATE TABLE regular_schema.null_shard_key_table_2(a int, b text); +SELECT create_distributed_table('regular_schema.null_shard_key_table_2', null); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- let's switch to a different worker node for the rest of the tests +\c - - - :worker_2_port +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2050500;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +-- Show that we don't chose to colocate regular single-shard tables with +-- tenant tables by default. +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=0 FROM pg_dist_schema WHERE colocationid = ( + SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'regular_schema.null_shard_key_table_2'::regclass +); +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- save the colocation id used for tenant_5 +SELECT colocationid AS tenant_5_old_colocationid FROM pg_dist_schema +WHERE schemaid::regnamespace::text = 'tenant_5' \gset +-- drop all the tables that belong to tenant_5 and create a new one +DROP TABLE tenant_5.tbl_1, tenant_5.tbl_2, tenant_5.tbl_3; +CREATE TABLE tenant_5.tbl_4(a int, b text); +-- verify that tenant_5 is still associated with the same colocation id +SELECT format( + 'SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid = %s FROM pg_dist_schema + WHERE schemaid::regnamespace::text = ''tenant_5''; + $$);', +:tenant_5_old_colocationid) AS verify_all_nodes_query \gset +:verify_all_nodes_query + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT schemaid INTO tenant_1_schemaid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_1' +$$); + result +--------------------------------------------------------------------- + SELECT 1 + SELECT 1 + SELECT 1 +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT schemaid INTO tenant_2_schemaid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_2' +$$); + result +--------------------------------------------------------------------- + SELECT 1 + SELECT 1 + SELECT 1 +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid INTO tenant_1_colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_1' +$$); + result +--------------------------------------------------------------------- + SELECT 1 + SELECT 1 + SELECT 1 +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid INTO tenant_2_colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_2' +$$); + result +--------------------------------------------------------------------- + SELECT 1 + SELECT 1 + SELECT 1 +(3 rows) + +SET client_min_messages TO WARNING; +SET citus.enable_schema_based_sharding TO OFF; +DROP SCHEMA tenant_1 CASCADE; +CREATE ROLE test_non_super_user; +ALTER ROLE test_non_super_user NOSUPERUSER; +ALTER SCHEMA tenant_2 OWNER TO non_existing_role; +ERROR: role "non_existing_role" does not exist +ALTER SCHEMA tenant_2 OWNER TO test_non_super_user; +select result from run_command_on_all_nodes ($$ + SELECT pg_get_userbyid(nspowner) AS schema_owner + FROM pg_namespace + WHERE nspname = 'tenant_2' +$$); + result +--------------------------------------------------------------------- + test_non_super_user + test_non_super_user + test_non_super_user +(3 rows) + +\c - - - :master_port +SET client_min_messages TO WARNING; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2050600;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +DROP OWNED BY test_non_super_user CASCADE; +\c - - - :worker_2_port +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2050700;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +DROP ROLE test_non_super_user; +-- Verify that dropping a tenant schema always deletes +-- the associated pg_dist_schema entry even if the the schema was +-- dropped while the GUC was set to off. +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE schemaid IN (SELECT schemaid FROM tenant_1_schemaid UNION SELECT schemaid FROM tenant_2_schemaid) +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE colocationid IN (SELECT colocationid FROM tenant_1_colocationid UNION SELECT colocationid FROM tenant_2_colocationid) +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + DROP TABLE tenant_1_schemaid, tenant_2_schemaid, tenant_1_colocationid, tenant_2_colocationid +$$); + result +--------------------------------------------------------------------- + DROP TABLE + DROP TABLE + DROP TABLE +(3 rows) + +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; +-- show that all schemaid values are unique and non-null in pg_dist_schema +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=0 FROM pg_dist_schema WHERE schemaid IS NULL; +SELECT (SELECT COUNT(*) FROM pg_dist_schema) = + (SELECT COUNT(DISTINCT(schemaid)) FROM pg_dist_schema); +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- show that all colocationid values are unique and non-null in pg_dist_schema +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=0 FROM pg_dist_schema WHERE colocationid IS NULL; +SELECT (SELECT COUNT(*) FROM pg_dist_schema) = + (SELECT COUNT(DISTINCT(colocationid)) FROM pg_dist_schema); +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +CREATE TABLE public.cannot_be_a_tenant_table(a int, b text); +-- show that we don't consider public schema as a tenant schema +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=0 FROM pg_dist_schema WHERE schemaid::regnamespace::text = 'public'; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +DROP TABLE public.cannot_be_a_tenant_table; +CREATE TEMPORARY TABLE temp_table(a int, b text); +-- show that we don't consider temporary schemas as tenant schemas +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=0 FROM pg_dist_schema WHERE schemaid::regnamespace::text = '%pg_temp%'; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +DROP TABLE temp_table; +-- test creating a tenant schema and a tenant table for it in the same transaction +BEGIN; + CREATE SCHEMA tenant_7; + CREATE TABLE tenant_7.tbl_1(a int, b text); + CREATE TABLE tenant_7.tbl_2(a int, b text); + SELECT colocationid = ( + SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_7.tbl_1'::regclass + ) + FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_7'; + ?column? +--------------------------------------------------------------------- + t +(1 row) + + -- make sure that both tables created in tenant_7 are colocated + SELECT COUNT(DISTINCT(colocationid)) = 1 FROM pg_dist_partition + WHERE logicalrelid IN ('tenant_7.tbl_1'::regclass, 'tenant_7.tbl_2'::regclass); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +COMMIT; +-- Test creating a tenant schema and a tenant table for it in the same transaction +-- but this time rollback the transaction. +BEGIN; + CREATE SCHEMA tenant_8; + CREATE TABLE tenant_8.tbl_1(a int, b text); + CREATE TABLE tenant_8.tbl_2(a int, b text); +ROLLBACK; +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=0 FROM pg_dist_schema WHERE schemaid::regnamespace::text = 'tenant_8'; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=0 FROM pg_dist_partition WHERE logicalrelid::text LIKE 'tenant_8.%'; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- Verify that citus.enable_schema_based_sharding and citus.use_citus_managed_tables +-- GUC don't interfere with each other when creating a table in tenant schema. +-- +-- In utility hook, we check whether the CREATE TABLE command is issued on a tenant +-- schema before checking whether citus.use_citus_managed_tables is set to ON to +-- avoid converting the table into a Citus managed table unnecessarily. +-- +-- If the CREATE TABLE command is issued on a tenant schema, we skip the check +-- for citus.use_citus_managed_tables. +SET citus.use_citus_managed_tables TO ON; +CREATE TABLE tenant_7.tbl_3(a int, b text, PRIMARY KEY(a)); +RESET citus.use_citus_managed_tables; +-- Verify that we don't unnecessarily convert a table into a Citus managed +-- table when creating it with a pre-defined foreign key to a reference table. +-- Notice that tenant_7.tbl_4 have foreign keys both to tenant_7.tbl_3 and +-- to reference_table. +CREATE TABLE tenant_7.tbl_4(a int REFERENCES regular_schema.reference_table, FOREIGN KEY(a) REFERENCES tenant_7.tbl_3(a) ON DELETE CASCADE); +INSERT INTO tenant_7.tbl_3 VALUES (1, 'a'), (2, 'b'), (3, 'c'); +INSERT INTO regular_schema.reference_table VALUES (1), (2), (3); +INSERT INTO tenant_7.tbl_4 VALUES (1), (2), (3); +DELETE FROM tenant_7.tbl_3 WHERE a < 3; +SELECT * FROM tenant_7.tbl_4 ORDER BY a; + a +--------------------------------------------------------------------- + 3 +(1 row) + +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=2 FROM pg_dist_partition +WHERE logicalrelid IN ('tenant_7.tbl_3'::regclass, 'tenant_7.tbl_4'::regclass) AND + partmethod = 'n' AND repmodel = 's' AND + colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_7.tbl_1'::regclass); +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +CREATE TABLE local_table(a int PRIMARY KEY); +-- fails because tenant tables cannot have foreign keys to local tables +CREATE TABLE tenant_7.tbl_5(a int REFERENCES local_table(a)); +ERROR: referenced table "local_table" must be a distributed table or a reference table +DETAIL: To enforce foreign keys, the referencing and referenced rows need to be stored on the same node. +HINT: You could use SELECT create_reference_table('local_table') to replicate the referenced table to all nodes or consider dropping the foreign key +-- Fails because tenant tables cannot have foreign keys to tenant tables +-- that belong to different tenant schemas. +CREATE TABLE tenant_5.tbl_5(a int, b text, FOREIGN KEY(a) REFERENCES tenant_7.tbl_3(a)); +ERROR: cannot create foreign key constraint since relations are not colocated or not referencing a reference table +DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table +CREATE SCHEMA tenant_9; +SELECT result FROM run_command_on_all_nodes($$ + SELECT schemaid INTO tenant_9_schemaid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_9' +$$); + result +--------------------------------------------------------------------- + SELECT 1 + SELECT 1 + SELECT 1 +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid INTO tenant_9_colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_9' +$$); + result +--------------------------------------------------------------------- + SELECT 1 + SELECT 1 + SELECT 1 +(3 rows) + +DROP SCHEMA tenant_9; +-- Make sure that dropping an empty tenant schema +-- doesn't leave any dangling entries in pg_dist_schema and +-- pg_dist_colocation. +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE schemaid = (SELECT schemaid FROM tenant_9_schemaid) +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_colocation + WHERE colocationid = (SELECT colocationid FROM tenant_9_colocationid) +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + DROP TABLE tenant_9_schemaid, tenant_9_colocationid +$$); + result +--------------------------------------------------------------------- + DROP TABLE + DROP TABLE + DROP TABLE +(3 rows) + +CREATE TABLE tenant_3.search_path_test(a int); +INSERT INTO tenant_3.search_path_test VALUES (1), (10); +CREATE TABLE tenant_5.search_path_test(a int); +INSERT INTO tenant_5.search_path_test VALUES (2); +CREATE TABLE tenant_7.search_path_test(a int); +INSERT INTO tenant_7.search_path_test VALUES (3); +SET search_path TO tenant_5; +PREPARE list_tuples AS SELECT * FROM search_path_test ORDER BY a; +SELECT * FROM search_path_test ORDER BY a; + a +--------------------------------------------------------------------- + 2 +(1 row) + +SET search_path TO tenant_3; +DELETE FROM search_path_test WHERE a = 1; +SELECT * FROM search_path_test ORDER BY a; + a +--------------------------------------------------------------------- + 10 +(1 row) + +SELECT regular_schema.increment_one(); + increment_one +--------------------------------------------------------------------- + +(1 row) + +EXECUTE list_tuples; + a +--------------------------------------------------------------------- + 11 +(1 row) + +SET search_path TO tenant_7; +DROP TABLE search_path_test; +SELECT * FROM pg_dist_partition WHERE logicalrelid::text = 'search_path_test'; + logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted +--------------------------------------------------------------------- +(0 rows) + +SET search_path TO tenant_5; +SELECT regular_schema.decrement_one(); + decrement_one +--------------------------------------------------------------------- + +(1 row) + +EXECUTE list_tuples; + a +--------------------------------------------------------------------- + 1 +(1 row) + +SET search_path TO regular_schema; +CREATE USER test_other_super_user WITH superuser; +\c - test_other_super_user +SET citus.enable_schema_based_sharding TO ON; +CREATE SCHEMA tenant_9; +\c - postgres +SET search_path TO regular_schema; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2060000;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +SET client_min_messages TO NOTICE; +SET citus.enable_schema_based_sharding TO ON; +SELECT result FROM run_command_on_all_nodes($$ + SELECT schemaid INTO tenant_9_schemaid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_9' +$$); + result +--------------------------------------------------------------------- + SELECT 1 + SELECT 1 + SELECT 1 +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid INTO tenant_9_colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_9' +$$); + result +--------------------------------------------------------------------- + SELECT 1 + SELECT 1 + SELECT 1 +(3 rows) + +\c - - - :master_port +SET client_min_messages TO WARNING; +DROP OWNED BY test_other_super_user; +\c - - - :worker_2_port +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2060100;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +SET client_min_messages TO NOTICE; +SET citus.enable_schema_based_sharding TO ON; +-- Make sure that dropping an empty tenant schema +-- (via DROP OWNED BY) doesn't leave any dangling entries in +-- pg_dist_schema and pg_dist_colocation. +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE schemaid = (SELECT schemaid FROM tenant_9_schemaid) +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_colocation + WHERE colocationid = (SELECT colocationid FROM tenant_9_colocationid) +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + DROP TABLE tenant_9_schemaid, tenant_9_colocationid +$$); + result +--------------------------------------------------------------------- + DROP TABLE + DROP TABLE + DROP TABLE +(3 rows) + +DROP USER test_other_super_user; +CREATE ROLE test_non_super_user WITH LOGIN; +ALTER ROLE test_non_super_user NOSUPERUSER; +\c - - - :master_port +GRANT CREATE ON DATABASE regression TO test_non_super_user; +GRANT CREATE ON SCHEMA public TO test_non_super_user ; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2070000;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +\c - test_non_super_user - :worker_2_port +SET search_path TO regular_schema; +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +SET client_min_messages TO NOTICE; +SET citus.enable_schema_based_sharding TO ON; +-- test create / drop tenant schema / table +CREATE SCHEMA tenant_10; +CREATE TABLE tenant_10.tbl_1(a int, b text); +CREATE TABLE tenant_10.tbl_2(a int, b text); +DROP TABLE tenant_10.tbl_2; +CREATE SCHEMA tenant_11; +SELECT result FROM run_command_on_all_nodes($$ + SELECT schemaid INTO tenant_10_schemaid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_10' +$$); + result +--------------------------------------------------------------------- + SELECT 1 + SELECT 1 + SELECT 1 +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT schemaid INTO tenant_11_schemaid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_11' +$$); + result +--------------------------------------------------------------------- + SELECT 1 + SELECT 1 + SELECT 1 +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid INTO tenant_10_colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_10' +$$); + result +--------------------------------------------------------------------- + SELECT 1 + SELECT 1 + SELECT 1 +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid INTO tenant_11_colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_11' +$$); + result +--------------------------------------------------------------------- + SELECT 1 + SELECT 1 + SELECT 1 +(3 rows) + +-- Verify metadata for tenant schemas that are created via non-super-user. +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(DISTINCT(schemaid))=2 FROM pg_dist_schema + WHERE schemaid IN (SELECT schemaid FROM tenant_10_schemaid UNION SELECT schemaid FROM tenant_11_schemaid) +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(DISTINCT(colocationid))=2 FROM pg_dist_schema + WHERE colocationid IN (SELECT colocationid FROM tenant_10_colocationid UNION SELECT colocationid FROM tenant_11_colocationid) +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SET client_min_messages TO WARNING; +DROP SCHEMA tenant_10, tenant_11 CASCADE; +SET client_min_messages TO NOTICE; +-- Verify that dropping a tenant schema via non-super-user +-- deletes the associated pg_dist_schema entry. +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE schemaid IN (SELECT schemaid FROM tenant_10_schemaid UNION SELECT schemaid FROM tenant_11_schemaid) +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_colocation + WHERE colocationid IN (SELECT colocationid FROM tenant_10_colocationid UNION SELECT colocationid FROM tenant_11_colocationid) +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + DROP TABLE tenant_10_schemaid, tenant_11_schemaid, tenant_10_colocationid, tenant_11_colocationid +$$); + result +--------------------------------------------------------------------- + DROP TABLE + DROP TABLE + DROP TABLE +(3 rows) + +\c - postgres - :master_port +REVOKE CREATE ON DATABASE regression FROM test_non_super_user; +REVOKE CREATE ON SCHEMA public FROM test_non_super_user; +DROP ROLE test_non_super_user; +-- Enable the GUC on all nodes to make sure that the CREATE SCHEMA/ TABLE +-- commands that we send to workers don't recursively try creating a +-- tenant schema / table. +\c - - - :master_port +ALTER SYSTEM SET citus.enable_schema_based_sharding TO ON; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +\c - - - :worker_1_port +ALTER SYSTEM SET citus.enable_schema_based_sharding TO ON; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +\c - - - :worker_2_port +ALTER SYSTEM SET citus.enable_schema_based_sharding TO ON; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +-- Verify that citus_internal.unregister_tenant_schema_globally can be called +-- from workers too, but it will fail for this case as we didn't yet drop the +-- schema. +SELECT citus_internal.unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); +ERROR: schema is expected to be already dropped because this function is only expected to be called from Citus drop hook +SET search_path TO regular_schema; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2080000;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +SET client_min_messages TO NOTICE; +CREATE TABLE tenant_3.tbl_1(a int, b text); +SET citus.enable_schema_based_sharding TO ON; +CREATE SCHEMA tenant_6; +CREATE TABLE tenant_6.tbl_1(a int, b text); +-- verify pg_dist_partition entries for tenant_3.tbl_1 and tenant_6.tbl_1 +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=2 FROM pg_dist_partition +WHERE logicalrelid IN ('tenant_3.tbl_1'::regclass, 'tenant_6.tbl_1'::regclass) AND + partmethod = 'n' AND repmodel = 's' AND colocationid > 0; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +\c - - - :master_port +ALTER SYSTEM RESET citus.enable_schema_based_sharding; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +\c - - - :worker_1_port +ALTER SYSTEM RESET citus.enable_schema_based_sharding; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +\c - - - :worker_2_port +SET search_path TO regular_schema; +SET citus.enable_schema_based_sharding TO ON; +SET search_path TO regular_schema; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2080200;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +SET client_min_messages TO NOTICE; +CREATE SCHEMA type_sch; +CREATE TABLE type_sch.tbl (a INT); +SELECT result FROM run_command_on_all_nodes($$ +SELECT jsonb_agg( + jsonb_build_object( + 'table_name', table_name, + 'citus_table_type', citus_table_type + ) + ORDER BY table_name::text + ) +FROM public.citus_tables WHERE table_name::text LIKE 'type_sch.tbl'; +$$); + result +--------------------------------------------------------------------- + [{"table_name": "type_sch.tbl", "citus_table_type": "schema"}] + [{"table_name": "type_sch.tbl", "citus_table_type": "schema"}] + [{"table_name": "type_sch.tbl", "citus_table_type": "schema"}] +(3 rows) + +SELECT format( + 'SELECT result FROM run_command_on_all_nodes($$ + SELECT jsonb_agg( + jsonb_build_object( + ''table_name'', table_name, + ''citus_table_type'', citus_table_type + ) + ORDER BY table_name::text + ) + FROM citus_shards WHERE table_name::text LIKE ''type_sch.tbl'' AND nodeport IN (%s, %s); + $$);', +:worker_1_port, :worker_2_port) AS verify_all_nodes_query \gset +:verify_all_nodes_query + result +--------------------------------------------------------------------- + [{"table_name": "type_sch.tbl", "citus_table_type": "schema"}] + [{"table_name": "type_sch.tbl", "citus_table_type": "schema"}] + [{"table_name": "type_sch.tbl", "citus_table_type": "schema"}] +(3 rows) + +RESET citus.enable_schema_based_sharding; +-- test citus_schemas +SET citus.enable_schema_based_sharding TO ON; +CREATE USER citus_schema_role SUPERUSER; +SET ROLE citus_schema_role; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2080400;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +CREATE SCHEMA citus_sch1; +CREATE TABLE citus_sch1.tbl1(a INT); +CREATE TABLE citus_sch1.tbl2(a INT); +RESET ROLE; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2080500;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +CREATE SCHEMA citus_sch2; +CREATE TABLE citus_sch2.tbl1(a INT); +SET citus.enable_schema_based_sharding TO OFF; +INSERT INTO citus_sch1.tbl1 SELECT * FROM generate_series(1, 10000); +INSERT INTO citus_sch1.tbl2 SELECT * FROM generate_series(1, 5000); +INSERT INTO citus_sch2.tbl1 SELECT * FROM generate_series(1, 12000); +SELECT result FROM run_command_on_all_nodes($$ + + +SELECT jsonb_agg( + jsonb_build_object( + 'schema_name', cs.schema_name, + 'correct_colocation_id', cs.colocation_id = ctc.colocation_id, + 'correct_size', cs.schema_size = ctc.calculated_size, + 'schema_owner', cs.schema_owner + ) + ORDER BY schema_name::text + ) +FROM public.citus_schemas cs +JOIN +( + SELECT + c.relnamespace, ct.colocation_id, + pg_size_pretty(sum(citus_total_relation_size(ct.table_name))) AS calculated_size + FROM public.citus_tables ct, pg_class c + WHERE ct.table_name::oid = c.oid + GROUP BY 1, 2 +) ctc ON cs.schema_name = ctc.relnamespace +WHERE cs.schema_name::text LIKE 'citus\_sch_' +$$); + result +--------------------------------------------------------------------- + [{"schema_name": "citus_sch1", "correct_size": true, "schema_owner": "citus_schema_role", "correct_colocation_id": true}, {"schema_name": "citus_sch2", "correct_size": true, "schema_owner": "postgres", "correct_colocation_id": true}] + [{"schema_name": "citus_sch1", "correct_size": true, "schema_owner": "citus_schema_role", "correct_colocation_id": true}, {"schema_name": "citus_sch2", "correct_size": true, "schema_owner": "postgres", "correct_colocation_id": true}] + [{"schema_name": "citus_sch1", "correct_size": true, "schema_owner": "citus_schema_role", "correct_colocation_id": true}, {"schema_name": "citus_sch2", "correct_size": true, "schema_owner": "postgres", "correct_colocation_id": true}] +(3 rows) + +-- test empty schema and empty tables +SET citus.enable_schema_based_sharding TO ON; +CREATE SCHEMA citus_empty_sch1; +CREATE SCHEMA citus_empty_sch2; +CREATE TABLE citus_empty_sch2.tbl1(a INT); +SET citus.enable_schema_based_sharding TO OFF; +SELECT result FROM run_command_on_all_nodes($$ +SELECT jsonb_agg( + jsonb_build_object( + 'schema_name', schema_name, + 'schema_size', schema_size + ) + ORDER BY schema_name::text + ) +FROM public.citus_schemas +WHERE schema_name::text LIKE 'citus\_empty\_sch_'; +$$); + result +--------------------------------------------------------------------- + [{"schema_name": "citus_empty_sch1", "schema_size": "0 bytes"}, {"schema_name": "citus_empty_sch2", "schema_size": "0 bytes"}] + [{"schema_name": "citus_empty_sch1", "schema_size": "0 bytes"}, {"schema_name": "citus_empty_sch2", "schema_size": "0 bytes"}] + [{"schema_name": "citus_empty_sch1", "schema_size": "0 bytes"}, {"schema_name": "citus_empty_sch2", "schema_size": "0 bytes"}] +(3 rows) + +-- test with non-privileged role +CREATE USER citus_schema_nonpri; +SET ROLE citus_schema_nonpri; +SET client_min_messages TO ERROR; +SELECT result FROM run_command_on_all_nodes($$ +SELECT jsonb_agg( + jsonb_build_object( + 'schema_name', schema_name, + 'colocation_id_visible', colocation_id > 0, + 'schema_size_visible', schema_size IS NOT NULL, + 'schema_owner', schema_owner + ) + ORDER BY schema_name::text + ) +FROM public.citus_schemas WHERE schema_name::text LIKE 'citus\_sch_'; +$$); + result +--------------------------------------------------------------------- + [{"schema_name": "citus_sch1", "schema_owner": "citus_schema_role", "schema_size_visible": false, "colocation_id_visible": true}, {"schema_name": "citus_sch2", "schema_owner": "postgres", "schema_size_visible": false, "colocation_id_visible": true}] + [{"schema_name": "citus_sch1", "schema_owner": "citus_schema_role", "schema_size_visible": false, "colocation_id_visible": true}, {"schema_name": "citus_sch2", "schema_owner": "postgres", "schema_size_visible": false, "colocation_id_visible": true}] + [{"schema_name": "citus_sch1", "schema_owner": "citus_schema_role", "schema_size_visible": false, "colocation_id_visible": true}, {"schema_name": "citus_sch2", "schema_owner": "postgres", "schema_size_visible": false, "colocation_id_visible": true}] +(3 rows) + +RESET client_min_messages; +RESET ROLE; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2080600;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +-- test we handle create schema with authorization properly for distributed schema +SET citus.enable_schema_based_sharding TO ON; +CREATE ROLE authschema; +CREATE SCHEMA AUTHORIZATION authschema; +SET citus.enable_schema_based_sharding TO OFF; +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=1 + FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'authschema'; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- mat view can be created under tenant schema +SET citus.enable_schema_based_sharding TO ON; +SET citus.shard_replication_factor TO 1; +CREATE SCHEMA sc1; +CREATE TABLE sc1.t1 (a int); +CREATE MATERIALIZED VIEW sc1.v1 AS SELECT * FROM sc1.t1; +SET citus.enable_schema_based_sharding TO OFF; +SELECT result FROM run_command_on_all_nodes($$ +SELECT colocationid > 0 FROM pg_dist_schema +WHERE schemaid::regnamespace::text = 'sc1'; +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SET client_min_messages TO WARNING; +DROP TABLE public.local_table; +-- On all nodes, save metadata records related to regular_schema and regular_schema_1 +-- for later verification of cleanup after dropping these propagated schemas from +-- workers. +SELECT result FROM run_command_on_all_nodes($$ + SELECT logicalrelid INTO expect_pg_dist_partition_cleanup + FROM pg_dist_partition + JOIN pg_class ON logicalrelid = pg_class.oid + JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid + WHERE pg_namespace.nspname IN ('regular_schema', 'regular_schema_1'); +$$); + result +--------------------------------------------------------------------- + SELECT 14 + SELECT 14 + SELECT 14 +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT shardid INTO expect_pg_dist_shard_cleanup + FROM pg_dist_shard + JOIN pg_dist_partition ON pg_dist_shard.logicalrelid = pg_dist_partition.logicalrelid + JOIN pg_class ON pg_dist_partition.logicalrelid = pg_class.oid + JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid + WHERE pg_namespace.nspname IN ('regular_schema', 'regular_schema_1'); +$$); + result +--------------------------------------------------------------------- + SELECT 172 + SELECT 172 + SELECT 172 +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT placementid INTO expect_pg_dist_placement_cleanup + FROM pg_dist_placement + JOIN pg_dist_shard ON pg_dist_placement.shardid = pg_dist_shard.shardid + JOIN pg_dist_partition ON pg_dist_shard.logicalrelid = pg_dist_partition.logicalrelid + JOIN pg_class ON pg_dist_partition.logicalrelid = pg_class.oid + JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid + WHERE pg_namespace.nspname IN ('regular_schema', 'regular_schema_1'); +$$); + result +--------------------------------------------------------------------- + SELECT 178 + SELECT 178 + SELECT 178 +(3 rows) + +CREATE SCHEMA local_schema; +-- show that we allow dropping distributed schemas from workers together with +-- regular propagated schemas +DROP SCHEMA tenant_5, regular_schema, tenant_3, local_schema CASCADE; +-- cannot drop non-schema-distributed tables together with schema-distributed tables from workers +DROP TABLE tenant_7.tbl_1, regular_schema_1.dist_table; +ERROR: operation is not allowed on this node +HINT: Connect to the coordinator and run it again. +-- can drop tables from multiple distributed schemas together +DROP TABLE IF EXISTS tenant_7.tbl_1, tenant_6.tbl_1, tenant_7.tbl_2, tenant_7.tbl_3, tenant_7.tbl_4, does_not_exists; +-- can drop multiple distributed schemas together +DROP SCHEMA tenant_7, tenant_6, type_sch, citus_sch1, citus_sch2, citus_empty_sch1, citus_empty_sch2, authschema, sc1 CASCADE; +-- can drop a regular propagated schema from worker too +DROP SCHEMA regular_schema_1 CASCADE; +DROP ROLE citus_schema_role, citus_schema_nonpri, authschema; +-- verify that metadata related to regular_schema and regular_schema_1 +-- are cleaned up properly on all nodes +SELECT result FROM run_command_on_all_nodes($$ + SELECT 0 = ( + SELECT COUNT(*) FROM pg_dist_partition + JOIN expect_pg_dist_partition_cleanup + ON pg_dist_partition.logicalrelid = expect_pg_dist_partition_cleanup.logicalrelid + ) + ( + SELECT COUNT(*) FROM pg_dist_shard + JOIN expect_pg_dist_shard_cleanup + ON pg_dist_shard.shardid = expect_pg_dist_shard_cleanup.shardid + ) + ( + SELECT COUNT(*) FROM pg_dist_placement + JOIN expect_pg_dist_placement_cleanup + ON pg_dist_placement.placementid = expect_pg_dist_placement_cleanup.placementid + ); +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + DROP TABLE expect_pg_dist_partition_cleanup, + expect_pg_dist_shard_cleanup, + expect_pg_dist_placement_cleanup; +$$); + result +--------------------------------------------------------------------- + DROP TABLE + DROP TABLE + DROP TABLE +(3 rows) + +\c - - - :master_port +SET client_min_messages TO WARNING; +SELECT citus_remove_node('localhost', :master_port); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +-- reset pg_dist_shardid_seq on the coordinator +DO $proc$ +DECLARE + v_last_value bigint; +BEGIN + SELECT last_value INTO v_last_value FROM pg_dist_shardid_seq_prev_state; + EXECUTE format('ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH %s', v_last_value); +END$proc$; +DROP TABLE pg_dist_shardid_seq_prev_state; diff --git a/src/test/regress/expected/schema_based_sharding_from_workers_b.out b/src/test/regress/expected/schema_based_sharding_from_workers_b.out new file mode 100644 index 00000000000..8cb94a8d0a4 --- /dev/null +++ b/src/test/regress/expected/schema_based_sharding_from_workers_b.out @@ -0,0 +1,2194 @@ +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- Remove the workers and add them with the groupids that we would assign at this point +-- of multi_1_schedule so when we run this test file individually, we still produce +-- the same sequence values when inserting into distributed tables using sequences from +-- workers. +SELECT 1 FROM citus_remove_node('localhost', :worker_1_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT 1 FROM citus_add_node('localhost', :worker_1_port, groupid => 33); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT 1 FROM citus_add_node('localhost', :worker_2_port, groupid => 47); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SET citus.next_shard_id TO 2090000; +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +SET client_min_messages TO WARNING; +CREATE SCHEMA regular_schema; +CREATE FUNCTION create_citus_local_with_data(table_name text) +RETURNS void +LANGUAGE plpgsql +AS $func$ +BEGIN + EXECUTE format(' + CREATE TABLE regular_schema.%I ( + col_1 text, + col_2 int, + col_3 bigint GENERATED BY DEFAULT AS IDENTITY (START WITH 1000 INCREMENT BY 1000), + col_4 timestamp, + col_5 int, + col_6 bigint GENERATED ALWAYS as (col_5 * 7) stored, + col_7 numeric, + col_8 text GENERATED ALWAYS as (col_1 || ''_dummy'') stored, + col_9 bigint GENERATED ALWAYS AS IDENTITY (START WITH 100 INCREMENT BY 100) + );', table_name); + + EXECUTE format(' + INSERT INTO regular_schema.%I (col_1, col_2, col_4, col_5, col_7) + SELECT + i::text, -- col_1 + i + 42, -- col_2 + ''2026-01-01 00:00:00''::timestamp + (i || '' seconds'')::interval, -- col_4 + i * 3, -- col_5 + (i * 1.5)::numeric -- col_7 + FROM generate_series(1, 1000) AS i;', table_name); + + EXECUTE format(' + INSERT INTO regular_schema.%I (col_1, col_2, col_3, col_4, col_5, col_7) + OVERRIDING SYSTEM VALUE + SELECT + i::text, -- col_1 + i + 42, -- col_2 + 1000 + i, -- col_3 + ''2026-01-01 00:00:00''::timestamp + (i || '' seconds'')::interval, -- col_4 + i * 3, -- col_5 + (i * 1.5)::numeric -- col_7 + FROM generate_series(1001, 2000) AS i;', table_name); + + EXECUTE format(' + SELECT citus_add_local_table_to_metadata(''regular_schema.%I'');', table_name); + + EXECUTE format(' + ALTER TABLE regular_schema.%I DROP COLUMN col_2;', table_name); + + EXECUTE format(' + ALTER TABLE regular_schema.%I DROP COLUMN col_7;', table_name); + + EXECUTE format(' + ALTER TABLE regular_schema.%I ADD COLUMN col_10 bigint DEFAULT -197;', table_name); +END; +$func$; +SELECT create_citus_local_with_data('citus_local_1'); + create_citus_local_with_data +--------------------------------------------------------------------- + +(1 row) + +SELECT create_citus_local_with_data('citus_local_2'); + create_citus_local_with_data +--------------------------------------------------------------------- + +(1 row) + +SELECT create_citus_local_with_data('citus_local_3'); + create_citus_local_with_data +--------------------------------------------------------------------- + +(1 row) + +SELECT create_citus_local_with_data('citus_local_4'); + create_citus_local_with_data +--------------------------------------------------------------------- + +(1 row) + +SELECT create_citus_local_with_data('citus_local_5'); + create_citus_local_with_data +--------------------------------------------------------------------- + +(1 row) + +SELECT create_citus_local_with_data('citus_local_6'); + create_citus_local_with_data +--------------------------------------------------------------------- + +(1 row) + +SELECT * INTO regular_schema.old_data_coordinator FROM regular_schema.citus_local_4; +SET citus.enable_schema_based_sharding TO ON; +CREATE SCHEMA tenant_4; +CREATE SCHEMA tenant_5; +CREATE SCHEMA tenant_6; +SET citus.enable_schema_based_sharding TO OFF; +-- Verify data consistency after moving to a distributed schema. +-- +-- Repeat this for three different schemas to test copying data +-- i) to shards on different workers and ii) to a shard on the +-- coordinator, i.e., this node. +-- +-- First, test this within a transaction block (and rollback) and then +-- outside of a transaction block. +BEGIN; + ALTER TABLE regular_schema.citus_local_4 SET SCHEMA tenant_4; + SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_4.citus_local_4) EXCEPT (TABLE regular_schema.old_data_coordinator) + UNION + (TABLE regular_schema.old_data_coordinator EXCEPT TABLE tenant_4.citus_local_4) + ); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +ROLLBACK; +ALTER TABLE regular_schema.citus_local_4 SET SCHEMA tenant_4; +SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_4.citus_local_4) EXCEPT (TABLE regular_schema.old_data_coordinator) + UNION + (TABLE regular_schema.old_data_coordinator EXCEPT TABLE tenant_4.citus_local_4) +); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +BEGIN; + ALTER TABLE regular_schema.citus_local_5 SET SCHEMA tenant_5; + SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_5.citus_local_5) EXCEPT (TABLE regular_schema.old_data_coordinator) + UNION + (TABLE regular_schema.old_data_coordinator EXCEPT TABLE tenant_5.citus_local_5) + ); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +ROLLBACK; +ALTER TABLE regular_schema.citus_local_5 SET SCHEMA tenant_5; +SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_5.citus_local_5) EXCEPT (TABLE regular_schema.old_data_coordinator) + UNION + (TABLE regular_schema.old_data_coordinator EXCEPT TABLE tenant_5.citus_local_5) +); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +BEGIN; + ALTER TABLE regular_schema.citus_local_6 SET SCHEMA tenant_6; + SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_6.citus_local_6) EXCEPT (TABLE regular_schema.old_data_coordinator) + UNION + (TABLE regular_schema.old_data_coordinator EXCEPT TABLE tenant_6.citus_local_6) + ); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +ROLLBACK; +ALTER TABLE regular_schema.citus_local_6 SET SCHEMA tenant_6; +SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_6.citus_local_6) EXCEPT (TABLE regular_schema.old_data_coordinator) + UNION + (TABLE regular_schema.old_data_coordinator EXCEPT TABLE tenant_6.citus_local_6) +); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +CREATE TABLE regular_schema.reference_table (id bigint PRIMARY KEY); +SELECT create_reference_table('regular_schema.reference_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE regular_schema.distributed_table (id int, text_col text); +SELECT create_distributed_table('regular_schema.distributed_table', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO regular_schema.distributed_table SELECT i, 'text_' || i FROM generate_series(1, 1000) AS i; +CREATE OR REPLACE FUNCTION get_sequence_info(seq regclass) +RETURNS TABLE ( + type_name text, + min_value bigint, + max_value bigint, + start_value bigint, + last_value bigint +) +AS $func$ +DECLARE + v_last_value bigint; +BEGIN + EXECUTE format('SELECT last_value FROM %s', seq::regclass) INTO v_last_value; + + RETURN QUERY + SELECT seqtypid::regtype::text, seqmin, seqmax, seqstart, v_last_value + FROM pg_sequence + WHERE seqrelid = seq; +END; +$func$ LANGUAGE plpgsql; +-- When creating a tenant table from workers, we always fetch the next shard id +-- and placement id from the coordinator because we never sync those sequences to +-- workers. For this reason, along this test file, we always set the next shard id +-- on the coordinator when needed, rather than setting it on the current worker node. +-- +-- Note that setting citus.next_shard_id on the coordinator would not work if the +-- citus internal connection we use to execute master_get_new_shardid() on the +-- coordinator changes because the underlying function, GetNextShardIdInternal(), +-- just increments NextShardId for the current session. For this reason, we instead +-- set pg_dist_shardid_seq on the coordinator in the tests where we test creating +-- distributed tables from a worker and where we want to use consistent shard ids. +-- +-- At the end of the test file, we reset pg_dist_shardid_seq. +SELECT last_value::bigint INTO pg_dist_shardid_seq_prev_state FROM pg_catalog.pg_dist_shardid_seq; +\c - - - :worker_1_port +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2091000;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +SET client_min_messages TO WARNING; +SELECT * INTO regular_schema.old_data_worker FROM regular_schema.citus_local_1; +SET citus.enable_schema_based_sharding TO ON; +CREATE SCHEMA tenant_1; +CREATE SCHEMA tenant_2; +CREATE SCHEMA tenant_3; +-- Verify data consistency after moving to a distributed schema. +-- +-- Repeat this for three different schemas to test copying data +-- i) to a shard on this worker, ii) to a shard on another worker, and +-- iii) to a shard on the coordinator. +-- +-- First, test this within a transaction block (and rollback) and then +-- outside of a transaction block. +BEGIN; + -- lock the table early in the transaction to make sure we don't break in that case + LOCK TABLE regular_schema.citus_local_1 IN ACCESS EXCLUSIVE MODE; + ALTER TABLE regular_schema.citus_local_1 SET SCHEMA tenant_1; + SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_1.citus_local_1) EXCEPT (TABLE regular_schema.old_data_worker) + UNION + (TABLE regular_schema.old_data_worker EXCEPT TABLE tenant_1.citus_local_1) + ); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +ROLLBACK; +ALTER TABLE regular_schema.citus_local_1 SET SCHEMA tenant_1; +SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_1.citus_local_1) EXCEPT (TABLE regular_schema.old_data_worker) + UNION + (TABLE regular_schema.old_data_worker EXCEPT TABLE tenant_1.citus_local_1) +); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +BEGIN; + ALTER TABLE regular_schema.citus_local_2 SET SCHEMA tenant_2; + SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_2.citus_local_2) EXCEPT (TABLE regular_schema.old_data_worker) + UNION + (TABLE regular_schema.old_data_worker EXCEPT TABLE tenant_2.citus_local_2) + ); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +ROLLBACK; +ALTER TABLE regular_schema.citus_local_2 SET SCHEMA tenant_2; +SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_2.citus_local_2) EXCEPT (TABLE regular_schema.old_data_worker) + UNION + (TABLE regular_schema.old_data_worker EXCEPT TABLE tenant_2.citus_local_2) +); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +BEGIN; + ALTER TABLE regular_schema.citus_local_3 SET SCHEMA tenant_3; + SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_3.citus_local_3) EXCEPT (TABLE regular_schema.old_data_worker) + UNION + (TABLE regular_schema.old_data_worker EXCEPT TABLE tenant_3.citus_local_3) + ); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +ROLLBACK; +ALTER TABLE regular_schema.citus_local_3 SET SCHEMA tenant_3; +SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_3.citus_local_3) EXCEPT (TABLE regular_schema.old_data_worker) + UNION + (TABLE regular_schema.old_data_worker EXCEPT TABLE tenant_3.citus_local_3) +); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +CREATE TABLE regular_schema.local_table_1 ( + col_1 int, + col_2 text, + col_3 text GENERATED ALWAYS AS (col_1::text || '_gen') stored, + col_4 bigint DEFAULT 42, + col_5 bigint GENERATED BY DEFAULT AS IDENTITY (START WITH 1 INCREMENT BY 1), + col_6 int GENERATED ALWAYS as (col_1 * 2) stored +); +INSERT INTO regular_schema.local_table_1 (col_1, col_2, col_4, col_5) +OVERRIDING SYSTEM VALUE +SELECT + i, -- col_1 + 'text_' || i, -- col_2 + i * 10, -- col_4 + 100 + i -- col_5 +FROM generate_series(1, 1000) AS i; +ALTER TABLE regular_schema.local_table_1 DROP COLUMN col_2; +ALTER TABLE regular_schema.local_table_1 DROP COLUMN col_5; +SELECT * INTO regular_schema.old_local_table_1 FROM regular_schema.local_table_1; +CREATE SCHEMA tenant_7; +-- test the same using a local table on this worker node +BEGIN; + ALTER TABLE regular_schema.local_table_1 SET SCHEMA tenant_7; + SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_7.local_table_1) EXCEPT (TABLE regular_schema.old_local_table_1) + UNION + (TABLE regular_schema.old_local_table_1 EXCEPT TABLE tenant_7.local_table_1) + ); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +ROLLBACK; +ALTER TABLE regular_schema.local_table_1 SET SCHEMA tenant_7; +SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_7.local_table_1) EXCEPT (TABLE regular_schema.old_local_table_1) + UNION + (TABLE regular_schema.old_local_table_1 EXCEPT TABLE tenant_7.local_table_1) +); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT pg_catalog.pg_table_size('tenant_7.local_table_1'::regclass) > 8192 as shell_table_has_data; + shell_table_has_data +--------------------------------------------------------------------- + t +(1 row) + +SELECT COUNT(*)=1000 FROM tenant_7.local_table_1; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT truncate_local_data_after_distributing_table('tenant_7.local_table_1'); + truncate_local_data_after_distributing_table +--------------------------------------------------------------------- + +(1 row) + +SELECT pg_catalog.pg_table_size('tenant_7.local_table_1'::regclass) = 8192 as shell_table_doesnt_have_data; + shell_table_doesnt_have_data +--------------------------------------------------------------------- + t +(1 row) + +SELECT COUNT(*)=1000 FROM tenant_7.local_table_1; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +CREATE SCHEMA tenant_8; +CREATE SEQUENCE dist_seq; +CREATE TABLE tenant_8.table_1(a bigint DEFAULT nextval('dist_seq') UNIQUE, "b" text, c bigint GENERATED BY DEFAULT AS IDENTITY); +INSERT INTO tenant_8.table_1("b") VALUES ('test'); +BEGIN; + -- add column + ALTER TABLE tenant_8.table_1 ADD COLUMN d bigint DEFAULT 2; + SELECT * FROM tenant_8.table_1 ORDER BY c; + a | b | c | d +--------------------------------------------------------------------- + 9288674231451649 | test | 9288674231451649 | 2 +(1 row) + + -- alter default, set to 3 + ALTER TABLE tenant_8.table_1 ALTER COLUMN d SET DEFAULT 3; + INSERT INTO tenant_8.table_1("b") VALUES ('test'); + SELECT * FROM tenant_8.table_1 ORDER BY c; + a | b | c | d +--------------------------------------------------------------------- + 9288674231451649 | test | 9288674231451649 | 2 + 9288674231451650 | test | 9288674231451650 | 3 +(2 rows) + + -- drop default, see null + ALTER TABLE tenant_8.table_1 ALTER COLUMN d DROP DEFAULT; + INSERT INTO tenant_8.table_1("b") VALUES ('test'); + SELECT * FROM tenant_8.table_1 ORDER BY c; + a | b | c | d +--------------------------------------------------------------------- + 9288674231451649 | test | 9288674231451649 | 2 + 9288674231451650 | test | 9288674231451650 | 3 + 9288674231451651 | test | 9288674231451651 | +(3 rows) + + -- cleanup the rows that were added to test the default behavior + DELETE FROM tenant_8.table_1 WHERE "b" = 'test' AND a > 9288674231451649; +COMMIT; +-- alter column type +ALTER TABLE tenant_8.table_1 ALTER COLUMN d TYPE text; +UPDATE tenant_8.table_1 SET d = 'this is a text' WHERE d = '2'; +SELECT * FROM tenant_8.table_1 ORDER BY c; + a | b | c | d +--------------------------------------------------------------------- + 9288674231451649 | test | 9288674231451649 | this is a text +(1 row) + +-- drop seq column +ALTER TABLE tenant_8.table_1 DROP COLUMN a; +SELECT * FROM tenant_8.table_1 ORDER BY c; + b | c | d +--------------------------------------------------------------------- + test | 9288674231451649 | this is a text +(1 row) + +-- add not null constraint +ALTER TABLE tenant_8.table_1 ALTER COLUMN b SET NOT NULL; +-- we want to hide the error message context because the node reporting the foreign key +-- violation might change from one run to another. +\set VERBOSITY terse +-- not null constraint violation, error out +INSERT INTO tenant_8.table_1 VALUES (NULL, 2, 'test'); +ERROR: null value in column "b" violates not-null constraint +\set VERBOSITY default +-- drop not null constraint and try again +ALTER TABLE tenant_8.table_1 ALTER COLUMN b DROP NOT NULL; +INSERT INTO tenant_8.table_1 VALUES (NULL, 3, 'test'); +SELECT * FROM tenant_8.table_1 ORDER BY c; + b | c | d +--------------------------------------------------------------------- + | 3 | test + test | 9288674231451649 | this is a text +(2 rows) + +-- add exclusion constraint +ALTER TABLE tenant_8.table_1 ADD CONSTRAINT exc_b EXCLUDE USING btree (b with =); +-- rename the exclusion constraint, errors out +ALTER TABLE tenant_8.table_1 RENAME CONSTRAINT exc_b TO exc_b_1; +ERROR: renaming constraints belonging to distributed tables is currently unsupported +-- create exclusion constraint without a name +ALTER TABLE tenant_8.table_1 ADD EXCLUDE USING btree (b with =); +INSERT INTO tenant_8.table_1 VALUES (100, 150, 'test150'); +-- similarly, we want to hide the error message context here as well +\set VERBOSITY terse +-- should error out due to exclusion constraint violation +INSERT INTO tenant_8.table_1 VALUES (100, 151, 'test151'); +ERROR: conflicting key value violates exclusion constraint "exc_b_2091002" +\set VERBOSITY default +-- test setting autovacuum option +ALTER TABLE tenant_8.table_1 SET (autovacuum_enabled = false); +BEGIN; + -- test multiple subcommands + ALTER TABLE tenant_8.table_1 ADD COLUMN int_column1 INTEGER, DROP COLUMN d, ADD COLUMN e bigint; + UPDATE tenant_8.table_1 SET e = c * 10; + -- test unique constraint without a name + ALTER TABLE tenant_8.table_1 ADD UNIQUE ("b"); + -- test add / drop primary key + ALTER TABLE tenant_8.table_1 ADD PRIMARY KEY (c); + ALTER TABLE tenant_8.table_1 DROP CONSTRAINT table_1_pkey; + ALTER TABLE tenant_8.table_1 ADD PRIMARY KEY (e); + SELECT * FROM tenant_8.table_1 ORDER BY c; + b | c | int_column1 | e +--------------------------------------------------------------------- + | 3 | | 30 + 100 | 150 | | 1500 + test | 9288674231451649 | | 92886742314516490 +(3 rows) + + -- test renaming table + ALTER TABLE tenant_8.table_1 RENAME TO table_2; + -- test renaming column + ALTER TABLE tenant_8.table_2 RENAME COLUMN e TO f; + -- test renaming an index + ALTER INDEX tenant_8.table_1_pkey RENAME TO table_1_pkey_renamed; +COMMIT; +-- make sure that the shell table definition is same on all nodes +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_8.table_2') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + CREATE TABLE tenant_8.table_2 (b text, c bigint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE) NOT NULL, int_column1 integer, f bigint NOT NULL) USING heap WITH (autovacuum_enabled='false'); ALTER TABLE tenant_8.table_2 OWNER TO postgres; ALTER TABLE tenant_8.table_2 ADD CONSTRAINT exc_b EXCLUDE USING btree (b WITH =); ALTER TABLE tenant_8.table_2 ADD CONSTRAINT table_1_b_excl EXCLUDE USING btree (b WITH =); ALTER TABLE tenant_8.table_2 ADD CONSTRAINT table_1_b_key UNIQUE (b); ALTER TABLE tenant_8.table_2 ADD CONSTRAINT table_1_pkey_renamed PRIMARY KEY (f) + CREATE TABLE tenant_8.table_2 (b text, c bigint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 9288674231451649 MAXVALUE 9570149208162305 START WITH 9288674231451649 CACHE 1 NO CYCLE) NOT NULL, int_column1 integer, f bigint NOT NULL) USING heap WITH (autovacuum_enabled='false'); ALTER TABLE tenant_8.table_2 OWNER TO postgres; ALTER TABLE tenant_8.table_2 ADD CONSTRAINT exc_b EXCLUDE USING btree (b WITH =); ALTER TABLE tenant_8.table_2 ADD CONSTRAINT table_1_b_excl EXCLUDE USING btree (b WITH =); ALTER TABLE tenant_8.table_2 ADD CONSTRAINT table_1_b_key UNIQUE (b); ALTER TABLE tenant_8.table_2 ADD CONSTRAINT table_1_pkey_renamed PRIMARY KEY (f) + CREATE TABLE tenant_8.table_2 (b text, c bigint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 13229323905400833 MAXVALUE 13510798882111489 START WITH 13229323905400833 CACHE 1 NO CYCLE) NOT NULL, int_column1 integer, f bigint NOT NULL) USING heap WITH (autovacuum_enabled='false'); ALTER TABLE tenant_8.table_2 OWNER TO postgres; ALTER TABLE tenant_8.table_2 ADD CONSTRAINT exc_b EXCLUDE USING btree (b WITH =); ALTER TABLE tenant_8.table_2 ADD CONSTRAINT table_1_b_excl EXCLUDE USING btree (b WITH =); ALTER TABLE tenant_8.table_2 ADD CONSTRAINT table_1_b_key UNIQUE (b); ALTER TABLE tenant_8.table_2 ADD CONSTRAINT table_1_pkey_renamed PRIMARY KEY (f) +(3 rows) + +CREATE SCHEMA alter_table_add_column; +\c - - - :master_port +CREATE SCHEMA alter_table_add_column_other_schema; +CREATE OR REPLACE FUNCTION alter_table_add_column_other_schema.my_random(numeric) + RETURNS numeric AS +$$ +BEGIN + RETURN 7 * $1; +END; +$$ +LANGUAGE plpgsql IMMUTABLE; +SET search_path TO alter_table_add_column; +CREATE COLLATION caseinsensitive ( + provider = icu, + locale = 'und-u-ks-level2' +); +CREATE TYPE "simple_!\'custom_type" AS (a integer, b integer); +\c - - - :worker_1_port +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2092000;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +SET citus.shard_replication_factor TO 1; +SET search_path TO alter_table_add_column; +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; +CREATE TABLE referenced (int_col integer PRIMARY KEY); +CREATE TABLE referencing (text_col text); +-- test alter table add column with various subcommands and options +ALTER TABLE referencing ADD COLUMN test_1 integer DEFAULT (alter_table_add_column_other_schema.my_random(7) + random() + 5) NOT NULL CONSTRAINT fkey REFERENCES referenced(int_col) ON UPDATE SET DEFAULT ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; +ALTER TABLE referencing ADD COLUMN test_2 integer UNIQUE REFERENCES referenced(int_col) ON UPDATE CASCADE ON DELETE SET DEFAULT NOT DEFERRABLE INITIALLY IMMEDIATE; +BEGIN; + ALTER TABLE referencing ADD COLUMN test_3 integer GENERATED ALWAYS AS (test_1 * alter_table_add_column_other_schema.my_random(1)) STORED UNIQUE REFERENCES referenced(int_col) MATCH FULL; + ALTER TABLE referencing ADD COLUMN test_4 integer PRIMARY KEY WITH (fillfactor=70) NOT NULL REFERENCES referenced(int_col) MATCH SIMPLE ON UPDATE CASCADE ON DELETE SET DEFAULT; + ALTER TABLE referencing ADD COLUMN test_5 integer CONSTRAINT unique_c UNIQUE WITH (fillfactor=50) NULL; +COMMIT; +ALTER TABLE referencing ADD COLUMN test_6 text COMPRESSION pglz COLLATE caseinsensitive NOT NULL; +ALTER TABLE referencing ADD COLUMN "test_\'!7" "simple_!\'custom_type"; +-- we give up deparsing ALTER TABLE command if it needs to create a check constraint, and we fallback to legacy behavior +ALTER TABLE referencing ADD COLUMN test_8 integer CHECK (test_8 > 0); +ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints +DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names +HINT: You can issue each command separately such as ALTER TABLE referencing ADD COLUMN test_8 data_type; ALTER TABLE referencing ADD CONSTRAINT constraint_name CHECK (check_expression); +ALTER TABLE referencing ADD COLUMN test_8 integer CONSTRAINT check_test_8 CHECK (test_8 > 0); +-- error out properly even if the REFERENCES does not include the column list of the referenced table +ALTER TABLE referencing ADD COLUMN test_9 bool, ADD COLUMN test_10 int REFERENCES referenced; +ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints +DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names +HINT: You can issue each command separately such as ALTER TABLE referencing ADD COLUMN test_10 data_type; ALTER TABLE referencing ADD CONSTRAINT constraint_name FOREIGN KEY (test_10) REFERENCES referenced; +ALTER TABLE referencing ADD COLUMN test_9 bool, ADD COLUMN test_10 int REFERENCES referenced(int_col); +ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints +DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names +HINT: You can issue each command separately such as ALTER TABLE referencing ADD COLUMN test_10 data_type; ALTER TABLE referencing ADD CONSTRAINT constraint_name FOREIGN KEY (test_10) REFERENCES referenced (int_col ); +-- supress notice messages because we want to ignore the notice about skipping adding test_6 +-- on the shard, if the shard is local +SET client_min_messages TO WARNING; +-- try to add test_6 again, but with IF NOT EXISTS +ALTER TABLE referencing ADD COLUMN IF NOT EXISTS test_6 text; +ALTER TABLE referencing ADD COLUMN IF NOT EXISTS test_6 integer; +SET client_min_messages TO NOTICE; +SELECT result FROM run_command_on_all_nodes( + $$SELECT get_grouped_fkey_constraints FROM get_grouped_fkey_constraints('alter_table_add_column.referencing')$$ +) +JOIN pg_dist_node USING (nodeid) +ORDER BY result; + result +--------------------------------------------------------------------- + [{"deferred": true, "deferable": true, "on_delete": "c", "on_update": "d", "match_type": "s", "constraint_names": ["fkey", "fkey_xxxxxxx"], "referenced_tables": ["alter_table_add_column.referenced", "alter_table_add_column.referenced_2092000"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing", "alter_table_add_column.referencing_2092001"], "referencing_columns": ["test_1"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "d", "on_update": "c", "match_type": "s", "constraint_names": ["referencing__fkey", "referencing__fkey_2092001"], "referenced_tables": ["alter_table_add_column.referenced", "alter_table_add_column.referenced_2092000"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing", "alter_table_add_column.referencing_2092001"], "referencing_columns": ["test_2"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "a", "on_update": "a", "match_type": "f", "constraint_names": ["referencing__fkey1", "referencing__fkey1_2092001"], "referenced_tables": ["alter_table_add_column.referenced", "alter_table_add_column.referenced_2092000"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing", "alter_table_add_column.referencing_2092001"], "referencing_columns": ["test_3"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "d", "on_update": "c", "match_type": "s", "constraint_names": ["referencing__fkey2", "referencing__fkey2_2092001"], "referenced_tables": ["alter_table_add_column.referenced", "alter_table_add_column.referenced_2092000"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing", "alter_table_add_column.referencing_2092001"], "referencing_columns": ["test_4"], "referencing_columns_set_null_or_default": null}] + [{"deferred": true, "deferable": true, "on_delete": "c", "on_update": "d", "match_type": "s", "constraint_names": ["fkey"], "referenced_tables": ["alter_table_add_column.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing"], "referencing_columns": ["test_1"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "d", "on_update": "c", "match_type": "s", "constraint_names": ["referencing__fkey"], "referenced_tables": ["alter_table_add_column.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing"], "referencing_columns": ["test_2"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "a", "on_update": "a", "match_type": "f", "constraint_names": ["referencing__fkey1"], "referenced_tables": ["alter_table_add_column.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing"], "referencing_columns": ["test_3"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "d", "on_update": "c", "match_type": "s", "constraint_names": ["referencing__fkey2"], "referenced_tables": ["alter_table_add_column.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing"], "referencing_columns": ["test_4"], "referencing_columns_set_null_or_default": null}] + [{"deferred": true, "deferable": true, "on_delete": "c", "on_update": "d", "match_type": "s", "constraint_names": ["fkey"], "referenced_tables": ["alter_table_add_column.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing"], "referencing_columns": ["test_1"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "d", "on_update": "c", "match_type": "s", "constraint_names": ["referencing__fkey"], "referenced_tables": ["alter_table_add_column.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing"], "referencing_columns": ["test_2"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "a", "on_update": "a", "match_type": "f", "constraint_names": ["referencing__fkey1"], "referenced_tables": ["alter_table_add_column.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing"], "referencing_columns": ["test_3"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "d", "on_update": "c", "match_type": "s", "constraint_names": ["referencing__fkey2"], "referenced_tables": ["alter_table_add_column.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing"], "referencing_columns": ["test_4"], "referencing_columns_set_null_or_default": null}] +(3 rows) + +SELECT result FROM run_command_on_all_nodes( + $$SELECT get_index_defs FROM get_index_defs('alter_table_add_column', 'referencing')$$ +) +JOIN pg_dist_node USING (nodeid) +ORDER BY result; + result +--------------------------------------------------------------------- + [{"indexdefs": ["CREATE UNIQUE INDEX referencing__key ON alter_table_add_column.referencing USING btree (test_2)", "CREATE UNIQUE INDEX referencing__key_2092001 ON alter_table_add_column.referencing_2092001 USING btree (test_2)"], "indexnames": ["referencing__key", "referencing__key_2092001"]}, {"indexdefs": ["CREATE UNIQUE INDEX referencing__key1 ON alter_table_add_column.referencing USING btree (test_3)", "CREATE UNIQUE INDEX referencing__key1_2092001 ON alter_table_add_column.referencing_2092001 USING btree (test_3)"], "indexnames": ["referencing__key1", "referencing__key1_2092001"]}, {"indexdefs": ["CREATE UNIQUE INDEX referencing_pkey ON alter_table_add_column.referencing USING btree (test_4) WITH (fillfactor='70')", "CREATE UNIQUE INDEX referencing_pkey_2092001 ON alter_table_add_column.referencing_2092001 USING btree (test_4) WITH (fillfactor='70')"], "indexnames": ["referencing_pkey", "referencing_pkey_2092001"]}, {"indexdefs": ["CREATE UNIQUE INDEX unique_c ON alter_table_add_column.referencing USING btree (test_5) WITH (fillfactor='50')", "CREATE UNIQUE INDEX unique_c_2092001 ON alter_table_add_column.referencing_2092001 USING btree (test_5) WITH (fillfactor='50')"], "indexnames": ["unique_c", "unique_c_2092001"]}] + [{"indexdefs": ["CREATE UNIQUE INDEX referencing__key ON alter_table_add_column.referencing USING btree (test_2)"], "indexnames": ["referencing__key"]}, {"indexdefs": ["CREATE UNIQUE INDEX referencing__key1 ON alter_table_add_column.referencing USING btree (test_3)"], "indexnames": ["referencing__key1"]}, {"indexdefs": ["CREATE UNIQUE INDEX referencing_pkey ON alter_table_add_column.referencing USING btree (test_4) WITH (fillfactor='70')"], "indexnames": ["referencing_pkey"]}, {"indexdefs": ["CREATE UNIQUE INDEX unique_c ON alter_table_add_column.referencing USING btree (test_5) WITH (fillfactor='50')"], "indexnames": ["unique_c"]}] + [{"indexdefs": ["CREATE UNIQUE INDEX referencing__key ON alter_table_add_column.referencing USING btree (test_2)"], "indexnames": ["referencing__key"]}, {"indexdefs": ["CREATE UNIQUE INDEX referencing__key1 ON alter_table_add_column.referencing USING btree (test_3)"], "indexnames": ["referencing__key1"]}, {"indexdefs": ["CREATE UNIQUE INDEX referencing_pkey ON alter_table_add_column.referencing USING btree (test_4) WITH (fillfactor='70')"], "indexnames": ["referencing_pkey"]}, {"indexdefs": ["CREATE UNIQUE INDEX unique_c ON alter_table_add_column.referencing USING btree (test_5) WITH (fillfactor='50')"], "indexnames": ["unique_c"]}] +(3 rows) + +SELECT result FROM run_command_on_all_nodes( + $$SELECT get_column_defaults FROM get_column_defaults('alter_table_add_column', 'referencing')$$ +) +JOIN pg_dist_node USING (nodeid) +ORDER BY result; + result +--------------------------------------------------------------------- + [{"column_name": "test_1", "column_default": "(((alter_table_add_column_other_schema.my_random((7)::numeric))::double precision + random()) + (5)::double precision)", "generation_expression": null}, {"column_name": "test_3", "column_default": null, "generation_expression": "((test_1)::numeric * alter_table_add_column_other_schema.my_random((1)::numeric))"}] + [{"column_name": "test_1", "column_default": "(((alter_table_add_column_other_schema.my_random((7)::numeric))::double precision + random()) + (5)::double precision)", "generation_expression": null}, {"column_name": "test_3", "column_default": null, "generation_expression": "((test_1)::numeric * alter_table_add_column_other_schema.my_random((1)::numeric))"}] + [{"column_name": "test_1", "column_default": "(((alter_table_add_column_other_schema.my_random((7)::numeric))::double precision + random()) + (5)::double precision)", "generation_expression": null}, {"column_name": "test_3", "column_default": null, "generation_expression": "((test_1)::numeric * alter_table_add_column_other_schema.my_random((1)::numeric))"}] +(3 rows) + +SELECT result FROM run_command_on_all_nodes( + $$SELECT get_column_attrs FROM get_column_attrs('alter_table_add_column.referencing')$$ +) +JOIN pg_dist_node USING (nodeid) +ORDER BY result; + result +--------------------------------------------------------------------- + {"relnames": ["alter_table_add_column.referencing", "alter_table_add_column.referencing_2092001"], "column_attrs": [{"not_null": true, "type_name": "int4", "column_name": "test_1", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_2", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_3", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "int4", "column_name": "test_4", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_5", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "text", "column_name": "test_6", "collation_name": "caseinsensitive", "compression_method": "p"}, {"not_null": false, "type_name": "int4", "column_name": "test_8", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "simple_!\\'custom_type", "column_name": "test_\\'!7", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "text", "column_name": "text_col", "collation_name": "default", "compression_method": ""}]} + {"relnames": ["alter_table_add_column.referencing"], "column_attrs": [{"not_null": true, "type_name": "int4", "column_name": "test_1", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_2", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_3", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "int4", "column_name": "test_4", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_5", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "text", "column_name": "test_6", "collation_name": "caseinsensitive", "compression_method": "p"}, {"not_null": false, "type_name": "int4", "column_name": "test_8", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "simple_!\\'custom_type", "column_name": "test_\\'!7", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "text", "column_name": "text_col", "collation_name": "default", "compression_method": ""}]} + {"relnames": ["alter_table_add_column.referencing"], "column_attrs": [{"not_null": true, "type_name": "int4", "column_name": "test_1", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_2", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_3", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "int4", "column_name": "test_4", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_5", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "text", "column_name": "test_6", "collation_name": "caseinsensitive", "compression_method": "p"}, {"not_null": false, "type_name": "int4", "column_name": "test_8", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "simple_!\\'custom_type", "column_name": "test_\\'!7", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "text", "column_name": "text_col", "collation_name": "default", "compression_method": ""}]} +(3 rows) + +CREATE TABLE tenant_8.table_3 (a int, b text); +INSERT INTO tenant_8.table_3 SELECT i, 'text_' || i FROM generate_series(1, 100) AS i; +-- test truncate +TRUNCATE tenant_8.table_3; +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM tenant_8.table_3 +$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +BEGIN; + CREATE SCHEMA tenant_9; + CREATE SEQUENCE tenant_9.seq_1 START 5000 INCREMENT 5; + CREATE USER tenant_9_owner; + CREATE TABLE tenant_9.table_1 ( + a bigint NULL DEFAULT 100, + b text COLLATE "C" DEFAULT now()::text, + c int DEFAULT nextval('tenant_9.seq_1'::regclass), + d bigint GENERATED BY DEFAULT AS IDENTITY + ( + MINVALUE 5 + MAXVALUE 100 + START WITH 10 + ), + e int NOT NULL REFERENCES regular_schema.reference_table(id) MATCH FULL ON UPDATE RESTRICT ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, + f int GENERATED ALWAYS AS (c * 2) STORED, + CONSTRAINT table_1_pkey PRIMARY KEY (a, b), + CONSTRAINT table_1_unique_b UNIQUE NULLS DISTINCT (b, a), + CONSTRAINT table_1_check_a_positive CHECK (a > 0) + ) + PARTITION BY RANGE (a); +COMMIT; +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_9.table_1') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + CREATE TABLE tenant_9.table_1 (a bigint DEFAULT 100 NOT NULL, b text DEFAULT (now())::text NOT NULL COLLATE "C", c integer DEFAULT nextval('tenant_9.seq_1'::regclass), d bigint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 5 MAXVALUE 100 START WITH 10 CACHE 1 NO CYCLE) NOT NULL, e integer NOT NULL, f integer GENERATED ALWAYS AS ((c * 2)) STORED, CONSTRAINT table_1_check_a_positive CHECK ((a > 0))) PARTITION BY RANGE (a) ; ALTER TABLE tenant_9.table_1 OWNER TO postgres; ALTER TABLE tenant_9.table_1 ADD CONSTRAINT table_1_pkey PRIMARY KEY (a, b); ALTER TABLE tenant_9.table_1 ADD CONSTRAINT table_1_unique_b UNIQUE (b, a) + CREATE TABLE tenant_9.table_1 (a bigint DEFAULT 100 NOT NULL, b text DEFAULT (now())::text NOT NULL COLLATE "C", c integer DEFAULT nextval('tenant_9.seq_1'::regclass), d bigint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 9288674231451649 MAXVALUE 9570149208162305 START WITH 9288674231451649 CACHE 1 NO CYCLE) NOT NULL, e integer NOT NULL, f integer GENERATED ALWAYS AS ((c * 2)) STORED, CONSTRAINT table_1_check_a_positive CHECK ((a > 0))) PARTITION BY RANGE (a) ; ALTER TABLE tenant_9.table_1 OWNER TO postgres; ALTER TABLE tenant_9.table_1 ADD CONSTRAINT table_1_pkey PRIMARY KEY (a, b); ALTER TABLE tenant_9.table_1 ADD CONSTRAINT table_1_unique_b UNIQUE (b, a) + CREATE TABLE tenant_9.table_1 (a bigint DEFAULT 100 NOT NULL, b text DEFAULT (now())::text NOT NULL COLLATE "C", c integer DEFAULT nextval('tenant_9.seq_1'::regclass), d bigint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 13229323905400833 MAXVALUE 13510798882111489 START WITH 13229323905400833 CACHE 1 NO CYCLE) NOT NULL, e integer NOT NULL, f integer GENERATED ALWAYS AS ((c * 2)) STORED, CONSTRAINT table_1_check_a_positive CHECK ((a > 0))) PARTITION BY RANGE (a) ; ALTER TABLE tenant_9.table_1 OWNER TO postgres; ALTER TABLE tenant_9.table_1 ADD CONSTRAINT table_1_pkey PRIMARY KEY (a, b); ALTER TABLE tenant_9.table_1 ADD CONSTRAINT table_1_unique_b UNIQUE (b, a) +(3 rows) + +CREATE TABLE tenant_9.table_2 ( + a serial, + b bigserial +) +WITH (autovacuum_enabled = false, fillfactor = 20); +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_9.table_2') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + CREATE TABLE tenant_9.table_2 (a integer DEFAULT nextval('tenant_9.table_2_a_seq'::regclass) NOT NULL, b bigint DEFAULT nextval('tenant_9.table_2_b_seq'::regclass) NOT NULL) USING heap WITH (autovacuum_enabled='false', fillfactor='20'); ALTER TABLE tenant_9.table_2 OWNER TO postgres + CREATE TABLE tenant_9.table_2 (a integer DEFAULT worker_nextval('tenant_9.table_2_a_seq'::regclass) NOT NULL, b bigint DEFAULT nextval('tenant_9.table_2_b_seq'::regclass) NOT NULL) USING heap WITH (autovacuum_enabled='false', fillfactor='20'); ALTER TABLE tenant_9.table_2 OWNER TO postgres + CREATE TABLE tenant_9.table_2 (a integer DEFAULT worker_nextval('tenant_9.table_2_a_seq'::regclass) NOT NULL, b bigint DEFAULT nextval('tenant_9.table_2_b_seq'::regclass) NOT NULL) USING heap WITH (autovacuum_enabled='false', fillfactor='20'); ALTER TABLE tenant_9.table_2 OWNER TO postgres +(3 rows) + +CREATE UNLOGGED TABLE tenant_9.table_3 ( + a int, + b text STORAGE EXTERNAL COMPRESSION pglz, + c int generated always as (a * 2) stored, + d int generated always as (a * 3) stored, + e text CONSTRAINT table_3_e_check CHECK (length(e) < 25) +); +\c - - - :master_port +CREATE UNIQUE INDEX new_index ON tenant_9.table_3 USING btree (a); +SELECT result FROM run_command_on_all_nodes($Q$ + SET citus.enable_ddl_propagation TO off; + CREATE FUNCTION fake_am_handler(internal) + RETURNS table_am_handler + AS 'citus' + LANGUAGE C; + CREATE ACCESS METHOD fake_am TYPE TABLE HANDLER fake_am_handler; + SET citus.enable_ddl_propagation TO on; +$Q$); + result +--------------------------------------------------------------------- + SET + SET + SET +(3 rows) + +-- Since Citus assumes access methods are part of the extension, make fake_am +-- owned manually to be able to pass checks on Citus while distributing tables. +SET client_min_messages TO WARNING; +ALTER EXTENSION citus ADD ACCESS METHOD fake_am; +SET client_min_messages TO NOTICE; +CREATE ROLE test_non_super_user; +ALTER ROLE test_non_super_user NOSUPERUSER; +CREATE ROLE rls_test_user_1 WITH LOGIN; +ALTER ROLE rls_test_user_1 NOSUPERUSER; +CREATE ROLE rls_test_user_2 WITH LOGIN; +ALTER ROLE rls_test_user_2 NOSUPERUSER; +CREATE TEXT SEARCH CONFIGURATION regular_schema.text_search_cfg (parser = default); +GRANT USAGE ON SCHEMA tenant_9 TO rls_test_user_1, rls_test_user_2; +\c - - - :worker_1_port +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2093000;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +SET citus.shard_replication_factor TO 1; +SET search_path TO alter_table_add_column; +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; +ALTER TABLE tenant_9.table_3 SET LOGGED; +ALTER TABLE tenant_9.table_3 ALTER COLUMN b SET DATA TYPE varchar(100) USING b::varchar(100); +ALTER TABLE tenant_9.table_3 ALTER COLUMN e SET COMPRESSION pglz; +ALTER TABLE tenant_9.table_3 ADD UNIQUE USING INDEX new_index; +ALTER TABLE tenant_9.table_3 VALIDATE CONSTRAINT table_3_e_check; +ALTER TABLE tenant_9.table_3 ALTER COLUMN a SET NOT NULL; +ALTER TABLE tenant_9.table_3 REPLICA IDENTITY USING INDEX new_index; +CLUSTER tenant_9.table_3 USING new_index; +-- not supported but let's keep as negative tests for future coverage +ALTER TABLE tenant_9.table_3 ALTER COLUMN d DROP EXPRESSION; +ERROR: alter table command is currently unsupported +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. +ALTER TABLE tenant_9.table_3 ALTER COLUMN c SET GENERATED BY DEFAULT RESTART WITH 500; +ERROR: alter table command is currently unsupported +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. +ALTER TABLE tenant_9.table_3 ALTER COLUMN c DROP IDENTITY IF EXISTS; +ERROR: alter table command is currently unsupported +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. +ALTER TABLE tenant_9.table_3 ALTER COLUMN a SET STATISTICS 50; +ERROR: alter table command is currently unsupported +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. +ALTER TABLE tenant_9.table_3 ALTER COLUMN b SET STORAGE RESET; +ERROR: alter table command is currently unsupported +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. +ALTER TABLE tenant_9.table_3 ALTER COLUMN b SET STORAGE MAIN; +ERROR: alter table command is currently unsupported +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. +ALTER TABLE tenant_9.table_3 CLUSTER ON new_index; +ERROR: alter table command is currently unsupported +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. +CREATE TABLE tenant_9.table_4 (a int, b text); +ALTER TABLE tenant_9.table_4 SET UNLOGGED; +ALTER TABLE tenant_9.table_4 SET ACCESS METHOD fake_am; +ALTER TABLE tenant_9.table_4 OWNER TO test_non_super_user; +SET client_min_messages TO ERROR; +CREATE TABLE tenant_9.table_5 (a int, b text) USING fake_am; +SET client_min_messages TO NOTICE; +SET citus.enable_schema_based_sharding TO OFF; +CREATE SCHEMA regular_schema_worker_1; +SET citus.enable_schema_based_sharding TO ON; +CREATE TABLE regular_schema_worker_1.local_table_1 ( + a int, + b text +); +CREATE STATISTICS ON a, b FROM regular_schema_worker_1.local_table_1; +CREATE INDEX text_search_idx ON regular_schema_worker_1.local_table_1 +USING gin (to_tsvector('regular_schema.text_search_cfg'::regconfig, (COALESCE(b, ''::character varying))::text)); +ALTER TABLE regular_schema_worker_1.local_table_1 SET SCHEMA tenant_9; +NOTICE: Moving local_table_1 into distributed schema tenant_9 +ALTER TABLE tenant_9.local_table_1 RENAME TO table_6; +-- we don't support yet but let's still keep it +ALTER TABLE tenant_9.table_6 ALTER COLUMN 2 SET STATISTICS 101; +ERROR: alter table command is currently unsupported +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. +SET citus.enable_schema_based_sharding TO OFF; +CREATE SCHEMA regular_schema_worker_2; +SET citus.enable_schema_based_sharding TO ON; +CREATE TABLE regular_schema_worker_2.local_table_2 (a int, tenant_id int); +INSERT INTO regular_schema_worker_2.local_table_2 SELECT i, 1 FROM generate_series(1, 5) AS i; +INSERT INTO regular_schema_worker_2.local_table_2 SELECT i, 2 FROM generate_series(6, 10) AS i; +CREATE POLICY local_table_2_select_policy ON regular_schema_worker_2.local_table_2 FOR SELECT TO rls_test_user_1, rls_test_user_2 USING (current_user = 'rls_test_user_' || tenant_id::text); +GRANT SELECT ON TABLE regular_schema_worker_2.local_table_2 TO rls_test_user_1, rls_test_user_2; +ALTER TABLE regular_schema_worker_2.local_table_2 SET SCHEMA tenant_9; +NOTICE: Moving local_table_2 into distributed schema tenant_9 +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$tenant_9.local_table_2$$) +ALTER TABLE tenant_9.local_table_2 RENAME TO table_7; +SET ROLE rls_test_user_1; +SELECT COUNT(*)=10 FROM tenant_9.table_7; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SET ROLE rls_test_user_2; +SELECT COUNT(*)=10 FROM tenant_9.table_7; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SET ROLE postgres; +ALTER TABLE tenant_9.table_7 ENABLE ROW LEVEL SECURITY; +SET ROLE rls_test_user_1; +SELECT COUNT(*)=5 FROM tenant_9.table_7; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SET ROLE rls_test_user_2; +SELECT COUNT(*)=5 FROM tenant_9.table_7; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SET ROLE postgres; +SET citus.enable_schema_based_sharding TO OFF; +CREATE SCHEMA regular_schema_worker_3; +SET citus.enable_schema_based_sharding TO ON; +CREATE TABLE regular_schema_worker_3.local_table_3 (value int, tenant_id int); +\c - - - :master_port +CREATE FUNCTION regular_schema.local_table_3_increment_value_tf() RETURNS trigger AS $local_table_3_increment_value_tf$ +BEGIN + UPDATE tenant_9.table_8 SET value=value+1; + RETURN NEW; +END; +$local_table_3_increment_value_tf$ LANGUAGE plpgsql; +CREATE FUNCTION regular_schema.local_table_3_notice_value_tf() RETURNS trigger AS $local_table_3_notice_value_tf$ +BEGIN + RAISE NOTICE 'New value is %', NEW.value; + RETURN NEW; +END; +$local_table_3_notice_value_tf$ LANGUAGE plpgsql; +\c - - - :worker_1_port +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2093500;$$); + result +--------------------------------------------------------------------- + ALTER SEQUENCE +(1 row) + +SET citus.shard_replication_factor TO 1; +SET search_path TO alter_table_add_column; +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; +CREATE TRIGGER local_table_3_insert_statement_trigger +AFTER INSERT ON regular_schema_worker_3.local_table_3 +FOR EACH STATEMENT EXECUTE FUNCTION regular_schema.local_table_3_increment_value_tf(); +-- Disable this to make sure that we allow triggers on distributed-schema tables +-- regardless of this setting as we don't think that triggers are unsafe on such +-- tables. +SET citus.enable_unsafe_triggers TO OFF; +ALTER TABLE regular_schema_worker_3.local_table_3 SET SCHEMA tenant_9; +NOTICE: Moving local_table_3 into distributed schema tenant_9 +ALTER TABLE tenant_9.local_table_3 RENAME TO table_8; +INSERT INTO tenant_9.table_8 VALUES (1), (1); +-- Show that trigger is executed only once, we should see two "2"s, not "1", +-- i.e., the trigger didn't fire, and not "3", i.e., the trigger fired more +-- than once. +SELECT * FROM tenant_9.table_8; + value | tenant_id +--------------------------------------------------------------------- + 2 | + 2 | +(2 rows) + +CREATE TRIGGER local_table_3_update_row_trigger +AFTER UPDATE ON tenant_9.table_8 +FOR EACH ROW EXECUTE FUNCTION regular_schema.local_table_3_notice_value_tf(); +-- we want to hide the error message context because the node sending +-- the notice might change from one run to another. +\set VERBOSITY terse +UPDATE tenant_9.table_8 SET value=0; +NOTICE: New value is 0 +NOTICE: New value is 0 +\set VERBOSITY default +ALTER TABLE tenant_9.table_8 DISABLE TRIGGER local_table_3_update_row_trigger; +-- no notice should be raised +UPDATE tenant_9.table_8 SET value=0; +ALTER TABLE tenant_9.table_8 DISABLE TRIGGER ALL; +INSERT INTO tenant_9.table_8 VALUES (1), (1); +SELECT * FROM tenant_9.table_8 ORDER BY value; + value | tenant_id +--------------------------------------------------------------------- + 0 | + 0 | + 1 | + 1 | +(4 rows) + +ALTER TABLE tenant_9.table_8 ENABLE TRIGGER ALL; +ALTER TABLE tenant_9.table_8 DISABLE TRIGGER local_table_3_insert_statement_trigger; +TRUNCATE tenant_9.table_8; +INSERT INTO tenant_9.table_8 VALUES (2), (2); +-- we want to hide the error message context because the node sending +-- the notice might change from one run to another. +\set VERBOSITY terse +UPDATE tenant_9.table_8 SET value=5; +NOTICE: New value is 5 +NOTICE: New value is 5 +\set VERBOSITY default +SELECT * FROM tenant_9.table_8 ORDER BY value; + value | tenant_id +--------------------------------------------------------------------- + 5 | + 5 | +(2 rows) + +ALTER TRIGGER local_table_3_insert_statement_trigger ON tenant_9.table_8 RENAME TO local_table_3_insert_statement_trigger_renamed; +CREATE TRIGGER trigger_to_drop +AFTER UPDATE ON tenant_9.table_8 +FOR EACH ROW EXECUTE FUNCTION regular_schema.local_table_3_notice_value_tf(); +DROP TRIGGER trigger_to_drop ON tenant_9.table_8; +-- not supported at all +ALTER TRIGGER local_table_3_insert_statement_trigger_renamed ON tenant_9.table_8 DEPENDS ON EXTENSION citus; +ERROR: trigger "local_table_3_insert_statement_trigger_renamed" depends on an extension and this is not supported for distributed tables and local tables added to metadata +DETAIL: Triggers from extensions are expected to be created on the workers by the extension they depend on. +SET citus.enable_schema_based_sharding TO OFF; +CREATE SCHEMA regular_schema_worker_4; +SET citus.enable_schema_based_sharding TO ON; +BEGIN; + -- Early in the transaction, force parallelization and make sure to use + -- remote connections even while accessing a local shard. + SET citus.force_max_query_parallelization TO ON; + SET citus.enable_local_execution TO OFF; + SELECT SUM(id) FROM regular_schema.distributed_table; + sum +--------------------------------------------------------------------- + 500500 +(1 row) + + -- restore settings back + SET citus.force_max_query_parallelization TO OFF; + SET citus.enable_local_execution TO ON; + CREATE TABLE regular_schema_worker_4.local_table_4 (a int, b text, c int); + CREATE INDEX index_with_name_1 ON regular_schema_worker_4.local_table_4 USING btree (a) WITH (fillfactor = 90); + CREATE INDEX index_with_name_2 ON regular_schema_worker_4.local_table_4 USING btree (b); + ALTER TABLE regular_schema_worker_4.local_table_4 SET SCHEMA tenant_9; +NOTICE: Moving local_table_4 into distributed schema tenant_9 + ALTER TABLE tenant_9.local_table_4 RENAME TO table_9; + DROP INDEX tenant_9.index_with_name_2; + CREATE INDEX index_with_name_3 ON tenant_9.table_9 USING btree (b) WITH (fillfactor = 99); + CREATE INDEX ON tenant_9.table_9 USING btree (c); +COMMIT; +CREATE INDEX CONCURRENTLY ON tenant_9.table_9 USING btree (a, b); +REINDEX TABLE tenant_9.table_9; +REINDEX INDEX tenant_9.index_with_name_1; +REINDEX INDEX CONCURRENTLY tenant_9.index_with_name_3; +CREATE INDEX index_with_name_4 ON tenant_9.table_9 USING btree (a DESC); +DROP INDEX CONCURRENTLY tenant_9.index_with_name_4; +CREATE INDEX index_with_name_5 ON tenant_9.table_9 USING btree ((a + b::int) DESC) WITH (fillfactor = 60); +ALTER INDEX tenant_9.index_with_name_1 RENAME TO index_with_name_1_renamed; +ALTER INDEX tenant_9.index_with_name_3 RESET (fillfactor); +ALTER INDEX tenant_9.index_with_name_5 SET (fillfactor = 80); +ALTER INDEX tenant_9.index_with_name_5 ALTER COLUMN 1 SET STATISTICS 4646; +SET citus.enable_schema_based_sharding TO OFF; +CREATE SCHEMA regular_schema_worker_5; +SET citus.enable_schema_based_sharding TO ON; +CREATE TABLE regular_schema_worker_5.local_table_5 (a int, b text, c int); +CREATE TABLE regular_schema_worker_5.local_table_6 (a int, b text, c int); +CREATE VIEW regular_schema_worker_5.table_10_view_1 AS +SELECT a FROM regular_schema_worker_5.local_table_5 +UNION +SELECT a FROM regular_schema_worker_5.local_table_6; +WARNING: "view regular_schema_worker_5.table_10_view_1" has dependency to "table regular_schema_worker_5.local_table_6" that is not in Citus' metadata +DETAIL: "view regular_schema_worker_5.table_10_view_1" will be created only locally +HINT: Distribute "table regular_schema_worker_5.local_table_6" first to distribute "view regular_schema_worker_5.table_10_view_1" +ALTER TABLE regular_schema_worker_5.local_table_5 SET SCHEMA tenant_9; +NOTICE: Moving local_table_5 into distributed schema tenant_9 +ALTER TABLE regular_schema_worker_5.local_table_6 SET SCHEMA tenant_9; +NOTICE: Moving local_table_6 into distributed schema tenant_9 +ALTER TABLE tenant_9.local_table_5 RENAME TO table_10; +ALTER TABLE tenant_9.local_table_6 RENAME TO table_11; +CREATE VIEW tenant_9.table_10_view_2 AS SELECT b FROM tenant_9.table_10; +CREATE VIEW tenant_9.table_10_view_3 AS SELECT c FROM tenant_9.table_11; +DROP VIEW tenant_9.table_10_view_2; +ALTER VIEW tenant_9.table_10_view_3 RENAME TO table_10_view_3_renamed; +ALTER VIEW regular_schema_worker_5.table_10_view_1 SET SCHEMA tenant_9; +ALTER VIEW tenant_9.table_10_view_1 OWNER TO test_non_super_user; +-- check distributed views on all nodes +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT jsonb_agg( + jsonb_build_object ( + 'schemaname', n.nspname, + 'viewname', c.relname + ) + ORDER BY n.nspname, c.relname +) +FROM pg_class c +LEFT JOIN pg_namespace n ON n.oid = c.relnamespace +WHERE c.relkind = 'v' AND n.nspname IN ('regular_schema_worker_5', 'tenant_9') AND c.relname LIKE 'table_10_view_%'; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + [{"viewname": "table_10_view_1", "schemaname": "tenant_9"}, {"viewname": "table_10_view_3_renamed", "schemaname": "tenant_9"}] + [{"viewname": "table_10_view_1", "schemaname": "tenant_9"}, {"viewname": "table_10_view_3_renamed", "schemaname": "tenant_9"}] + [{"viewname": "table_10_view_1", "schemaname": "tenant_9"}, {"viewname": "table_10_view_3_renamed", "schemaname": "tenant_9"}] +(3 rows) + +-- check the owner of tenant_9.table_10_view_1 on all nodes +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT pg_get_userbyid(c.relowner) +FROM pg_class c +LEFT JOIN pg_namespace n ON n.oid = c.relnamespace +WHERE c.relkind = 'v' AND n.nspname = 'tenant_9' AND c.relname = 'table_10_view_1'; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + test_non_super_user + test_non_super_user + test_non_super_user +(3 rows) + +-- make sure that the shell table definition is same on all nodes +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_9.table_3') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + CREATE TABLE tenant_9.table_3 (a integer NOT NULL, b character varying(100), c integer GENERATED ALWAYS AS ((a * 2)) STORED, d integer GENERATED ALWAYS AS ((a * 3)) STORED, e text COMPRESSION pglz, CONSTRAINT table_3_e_check CHECK ((length(e) < 25))) USING heap; ALTER TABLE tenant_9.table_3 OWNER TO postgres; ALTER TABLE tenant_9.table_3 ADD CONSTRAINT new_index UNIQUE (a); ALTER TABLE tenant_9.table_3 CLUSTER ON new_index; ALTER TABLE tenant_9.table_3 REPLICA IDENTITY USING INDEX new_index + CREATE TABLE tenant_9.table_3 (a integer NOT NULL, b character varying(100), c integer GENERATED ALWAYS AS ((a * 2)) STORED, d integer GENERATED ALWAYS AS ((a * 3)) STORED, e text COMPRESSION pglz, CONSTRAINT table_3_e_check CHECK ((length(e) < 25))) USING heap; ALTER TABLE tenant_9.table_3 OWNER TO postgres; ALTER TABLE tenant_9.table_3 ADD CONSTRAINT new_index UNIQUE (a); ALTER TABLE tenant_9.table_3 CLUSTER ON new_index; ALTER TABLE tenant_9.table_3 REPLICA IDENTITY USING INDEX new_index + CREATE TABLE tenant_9.table_3 (a integer NOT NULL, b character varying(100), c integer GENERATED ALWAYS AS ((a * 2)) STORED, d integer GENERATED ALWAYS AS ((a * 3)) STORED, e text COMPRESSION pglz, CONSTRAINT table_3_e_check CHECK ((length(e) < 25))) USING heap; ALTER TABLE tenant_9.table_3 OWNER TO postgres; ALTER TABLE tenant_9.table_3 ADD CONSTRAINT new_index UNIQUE (a); ALTER TABLE tenant_9.table_3 CLUSTER ON new_index; ALTER TABLE tenant_9.table_3 REPLICA IDENTITY USING INDEX new_index +(3 rows) + +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_9.table_4') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + CREATE UNLOGGED TABLE tenant_9.table_4 (a integer, b text) USING fake_am; ALTER TABLE tenant_9.table_4 OWNER TO test_non_super_user + CREATE UNLOGGED TABLE tenant_9.table_4 (a integer, b text) USING fake_am; ALTER TABLE tenant_9.table_4 OWNER TO test_non_super_user + CREATE UNLOGGED TABLE tenant_9.table_4 (a integer, b text) USING fake_am; ALTER TABLE tenant_9.table_4 OWNER TO test_non_super_user +(3 rows) + +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_9.table_5') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + CREATE TABLE tenant_9.table_5 (a integer, b text) USING fake_am; ALTER TABLE tenant_9.table_5 OWNER TO postgres + CREATE TABLE tenant_9.table_5 (a integer, b text) USING fake_am; ALTER TABLE tenant_9.table_5 OWNER TO postgres + CREATE TABLE tenant_9.table_5 (a integer, b text) USING fake_am; ALTER TABLE tenant_9.table_5 OWNER TO postgres +(3 rows) + +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_9.table_6') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + CREATE TABLE tenant_9.table_6 (a integer, b text) USING heap; ALTER TABLE tenant_9.table_6 OWNER TO postgres; CREATE INDEX text_search_idx ON tenant_9.table_6 USING gin (to_tsvector('regular_schema.text_search_cfg'::regconfig, COALESCE(b, (''::character varying)::text))); CREATE STATISTICS regular_schema_worker_1.local_table_1_a_b_stat ON a, b FROM tenant_9.table_6 + CREATE TABLE tenant_9.table_6 (a integer, b text) USING heap; ALTER TABLE tenant_9.table_6 OWNER TO postgres; CREATE INDEX text_search_idx ON tenant_9.table_6 USING gin (to_tsvector('regular_schema.text_search_cfg'::regconfig, COALESCE(b, (''::character varying)::text))); CREATE STATISTICS regular_schema_worker_1.local_table_1_a_b_stat ON a, b FROM tenant_9.table_6 + CREATE TABLE tenant_9.table_6 (a integer, b text) USING heap; ALTER TABLE tenant_9.table_6 OWNER TO postgres; CREATE INDEX text_search_idx ON tenant_9.table_6 USING gin (to_tsvector('regular_schema.text_search_cfg'::regconfig, COALESCE(b, (''::character varying)::text))); CREATE STATISTICS regular_schema_worker_1.local_table_1_a_b_stat ON a, b FROM tenant_9.table_6 +(3 rows) + +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT replace( + string_agg(ddl_events, '; '), + -- to avoid adding another test ouput for PG < 17, replace this with an empty string + ' GRANT MAINTAIN ON tenant_9.table_7 TO postgres;', + '' +) FROM master_get_table_ddl_events('tenant_9.table_7') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + CREATE TABLE tenant_9.table_7 (a integer, tenant_id integer) USING heap; REVOKE ALL ON tenant_9.table_7 FROM PUBLIC; GRANT INSERT ON tenant_9.table_7 TO postgres; GRANT SELECT ON tenant_9.table_7 TO postgres; GRANT UPDATE ON tenant_9.table_7 TO postgres; GRANT DELETE ON tenant_9.table_7 TO postgres; GRANT TRUNCATE ON tenant_9.table_7 TO postgres; GRANT REFERENCES ON tenant_9.table_7 TO postgres; GRANT TRIGGER ON tenant_9.table_7 TO postgres; GRANT SELECT ON tenant_9.table_7 TO rls_test_user_1; GRANT SELECT ON tenant_9.table_7 TO rls_test_user_2; ALTER TABLE tenant_9.table_7 OWNER TO postgres; ALTER TABLE tenant_9.table_7 ENABLE ROW LEVEL SECURITY; CREATE POLICY local_table_2_select_policy ON tenant_9.table_7 FOR SELECT TO rls_test_user_1, rls_test_user_2 USING ((CURRENT_USER = ('rls_test_user_'::text || (tenant_id)::text))) + CREATE TABLE tenant_9.table_7 (a integer, tenant_id integer) USING heap; REVOKE ALL ON tenant_9.table_7 FROM PUBLIC; GRANT INSERT ON tenant_9.table_7 TO postgres; GRANT SELECT ON tenant_9.table_7 TO postgres; GRANT UPDATE ON tenant_9.table_7 TO postgres; GRANT DELETE ON tenant_9.table_7 TO postgres; GRANT TRUNCATE ON tenant_9.table_7 TO postgres; GRANT REFERENCES ON tenant_9.table_7 TO postgres; GRANT TRIGGER ON tenant_9.table_7 TO postgres; GRANT SELECT ON tenant_9.table_7 TO rls_test_user_1; GRANT SELECT ON tenant_9.table_7 TO rls_test_user_2; ALTER TABLE tenant_9.table_7 OWNER TO postgres; ALTER TABLE tenant_9.table_7 ENABLE ROW LEVEL SECURITY; CREATE POLICY local_table_2_select_policy ON tenant_9.table_7 FOR SELECT TO rls_test_user_1, rls_test_user_2 USING ((CURRENT_USER = ('rls_test_user_'::text || (tenant_id)::text))) + CREATE TABLE tenant_9.table_7 (a integer, tenant_id integer) USING heap; REVOKE ALL ON tenant_9.table_7 FROM PUBLIC; GRANT INSERT ON tenant_9.table_7 TO postgres; GRANT SELECT ON tenant_9.table_7 TO postgres; GRANT UPDATE ON tenant_9.table_7 TO postgres; GRANT DELETE ON tenant_9.table_7 TO postgres; GRANT TRUNCATE ON tenant_9.table_7 TO postgres; GRANT REFERENCES ON tenant_9.table_7 TO postgres; GRANT TRIGGER ON tenant_9.table_7 TO postgres; GRANT SELECT ON tenant_9.table_7 TO rls_test_user_1; GRANT SELECT ON tenant_9.table_7 TO rls_test_user_2; ALTER TABLE tenant_9.table_7 OWNER TO postgres; ALTER TABLE tenant_9.table_7 ENABLE ROW LEVEL SECURITY; CREATE POLICY local_table_2_select_policy ON tenant_9.table_7 FOR SELECT TO rls_test_user_1, rls_test_user_2 USING ((CURRENT_USER = ('rls_test_user_'::text || (tenant_id)::text))) +(3 rows) + +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_9.table_8') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + CREATE TABLE tenant_9.table_8 (value integer, tenant_id integer) USING heap; ALTER TABLE tenant_9.table_8 OWNER TO postgres; CREATE TRIGGER local_table_3_insert_statement_trigger_renamed AFTER INSERT ON tenant_9.table_8 FOR EACH STATEMENT EXECUTE FUNCTION regular_schema.local_table_3_increment_value_tf(); ALTER TABLE tenant_9.table_8 DISABLE TRIGGER local_table_3_insert_statement_trigger_renamed;; CREATE TRIGGER local_table_3_update_row_trigger AFTER UPDATE ON tenant_9.table_8 FOR EACH ROW EXECUTE FUNCTION regular_schema.local_table_3_notice_value_tf(); ALTER TABLE tenant_9.table_8 ENABLE TRIGGER local_table_3_update_row_trigger; + CREATE TABLE tenant_9.table_8 (value integer, tenant_id integer) USING heap; ALTER TABLE tenant_9.table_8 OWNER TO postgres; CREATE TRIGGER local_table_3_insert_statement_trigger_renamed AFTER INSERT ON tenant_9.table_8 FOR EACH STATEMENT EXECUTE FUNCTION regular_schema.local_table_3_increment_value_tf(); ALTER TABLE tenant_9.table_8 DISABLE TRIGGER local_table_3_insert_statement_trigger_renamed;; CREATE TRIGGER local_table_3_update_row_trigger AFTER UPDATE ON tenant_9.table_8 FOR EACH ROW EXECUTE FUNCTION regular_schema.local_table_3_notice_value_tf(); ALTER TABLE tenant_9.table_8 ENABLE TRIGGER local_table_3_update_row_trigger; + CREATE TABLE tenant_9.table_8 (value integer, tenant_id integer) USING heap; ALTER TABLE tenant_9.table_8 OWNER TO postgres; CREATE TRIGGER local_table_3_insert_statement_trigger_renamed AFTER INSERT ON tenant_9.table_8 FOR EACH STATEMENT EXECUTE FUNCTION regular_schema.local_table_3_increment_value_tf(); ALTER TABLE tenant_9.table_8 DISABLE TRIGGER local_table_3_insert_statement_trigger_renamed;; CREATE TRIGGER local_table_3_update_row_trigger AFTER UPDATE ON tenant_9.table_8 FOR EACH ROW EXECUTE FUNCTION regular_schema.local_table_3_notice_value_tf(); ALTER TABLE tenant_9.table_8 ENABLE TRIGGER local_table_3_update_row_trigger; +(3 rows) + +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_9.table_9') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + CREATE TABLE tenant_9.table_9 (a integer, b text, c integer) USING heap; ALTER TABLE tenant_9.table_9 OWNER TO postgres; CREATE INDEX index_with_name_1_renamed ON tenant_9.table_9 USING btree (a) WITH (fillfactor='90'); CREATE INDEX table_9_c_idx ON tenant_9.table_9 USING btree (c); CREATE INDEX table_9_a_b_idx ON tenant_9.table_9 USING btree (a, b); CREATE INDEX index_with_name_3 ON tenant_9.table_9 USING btree (b); CREATE INDEX index_with_name_5 ON tenant_9.table_9 USING btree (((a + (b)::integer)) DESC) WITH (fillfactor='80'); ALTER INDEX tenant_9.index_with_name_5 ALTER COLUMN 1 SET STATISTICS 4646 + CREATE TABLE tenant_9.table_9 (a integer, b text, c integer) USING heap; ALTER TABLE tenant_9.table_9 OWNER TO postgres; CREATE INDEX index_with_name_1_renamed ON tenant_9.table_9 USING btree (a) WITH (fillfactor='90'); CREATE INDEX table_9_c_idx ON tenant_9.table_9 USING btree (c); CREATE INDEX table_9_a_b_idx ON tenant_9.table_9 USING btree (a, b); CREATE INDEX index_with_name_3 ON tenant_9.table_9 USING btree (b); CREATE INDEX index_with_name_5 ON tenant_9.table_9 USING btree (((a + (b)::integer)) DESC) WITH (fillfactor='80'); ALTER INDEX tenant_9.index_with_name_5 ALTER COLUMN 1 SET STATISTICS 4646 + CREATE TABLE tenant_9.table_9 (a integer, b text, c integer) USING heap; ALTER TABLE tenant_9.table_9 OWNER TO postgres; CREATE INDEX index_with_name_1_renamed ON tenant_9.table_9 USING btree (a) WITH (fillfactor='90'); CREATE INDEX table_9_c_idx ON tenant_9.table_9 USING btree (c); CREATE INDEX table_9_a_b_idx ON tenant_9.table_9 USING btree (a, b); CREATE INDEX index_with_name_3 ON tenant_9.table_9 USING btree (b); CREATE INDEX index_with_name_5 ON tenant_9.table_9 USING btree (((a + (b)::integer)) DESC) WITH (fillfactor='80'); ALTER INDEX tenant_9.index_with_name_5 ALTER COLUMN 1 SET STATISTICS 4646 +(3 rows) + +-- Considering the various ways a table can use sequences, test if we +-- properly adjust sequence min / max values on all worker nodes, +-- including the current one, as well as testing if we sync last_value +-- to the coordinator. +SET citus.enable_schema_based_sharding TO OFF; +CREATE SCHEMA initially_local_schema_seq_test_with_initial_data; +SET citus.enable_schema_based_sharding TO ON; +-- schema to move tables under initially_local_schema_seq_test_with_initial_data into +CREATE SCHEMA dist_schema_seq_test_with_initial_data; +CREATE SCHEMA dist_schema_seq_test_without_initial_data; +-- create sequences and a table under initially_local_schema_seq_test_with_initial_data, and move the table to dist_schema_seq_test_with_initial_data +CREATE SEQUENCE initially_local_schema_seq_test_with_initial_data.bigint_col_bigint_sequence AS bigint; +CREATE SEQUENCE initially_local_schema_seq_test_with_initial_data.bigint_col_int_sequence AS int; +CREATE SEQUENCE initially_local_schema_seq_test_with_initial_data.int_col_bigint_sequence AS bigint; +CREATE SEQUENCE initially_local_schema_seq_test_with_initial_data.int_col_int_sequence AS int; +CREATE SEQUENCE initially_local_schema_seq_test_with_initial_data.smallint_col_smallint_sequence AS smallint; +-- also create some sequences with custom settings +CREATE SEQUENCE initially_local_schema_seq_test_with_initial_data.custom_bigint_col_bigint_sequence AS bigint MINVALUE 1000 MAXVALUE 1000000 START WITH 5000 INCREMENT 100; +CREATE SEQUENCE initially_local_schema_seq_test_with_initial_data.custom_int_col_int_sequence AS int MINVALUE 100 MAXVALUE 2000000 START WITH 1000 INCREMENT 5; +CREATE SEQUENCE initially_local_schema_seq_test_with_initial_data.custom_smallint_col_smallint_sequence AS smallint MINVALUE 10 MAXVALUE 1000 START WITH 50 INCREMENT 15; +CREATE TABLE initially_local_schema_seq_test_with_initial_data.nextval_test ( + id int, + column_to_drop_8 text, + bigint_col_with_bigint_sequence bigint DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.bigint_col_bigint_sequence'::regclass), + column_to_drop_1 text, + bigint_col_with_int_sequence bigint DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.bigint_col_int_sequence'::regclass), + column_to_drop_6 text, + int_col_with_bigint_sequence int DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.int_col_bigint_sequence'::regclass), + column_to_drop_2 text, + column_to_drop_3 text, + column_to_drop_5 text, + int_col_with_int_sequence int DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.int_col_int_sequence'::regclass), + column_to_drop_4 text, + column_to_drop_7 text, + smallint_col_with_smallint_sequence smallint DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.smallint_col_smallint_sequence'::regclass), + bigint_col_with_custom_bigint_sequence bigint DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.custom_bigint_col_bigint_sequence'::regclass), + int_col_with_custom_int_sequence int DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.custom_int_col_int_sequence'::regclass), + smallint_col_with_custom_smallint_sequence smallint DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.custom_smallint_col_smallint_sequence'::regclass) +); +-- Mark some of the sequences as owned by the columns. +-- Note that, marking a sequence as owned by a table column will cause +-- automatically moving the sequence to the same schema with the table +-- when moving the table to another schema. +ALTER SEQUENCE initially_local_schema_seq_test_with_initial_data.bigint_col_bigint_sequence OWNED BY initially_local_schema_seq_test_with_initial_data.nextval_test.bigint_col_with_bigint_sequence; +ALTER SEQUENCE initially_local_schema_seq_test_with_initial_data.bigint_col_int_sequence OWNED BY initially_local_schema_seq_test_with_initial_data.nextval_test.bigint_col_with_int_sequence; +ALTER SEQUENCE initially_local_schema_seq_test_with_initial_data.int_col_bigint_sequence OWNED BY initially_local_schema_seq_test_with_initial_data.nextval_test.int_col_with_bigint_sequence; +INSERT INTO initially_local_schema_seq_test_with_initial_data.nextval_test (id) SELECT i FROM generate_series(1, 5) AS i; +ALTER TABLE initially_local_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_1; +ALTER TABLE initially_local_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_2; +ALTER TABLE initially_local_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_3; +ALTER TABLE initially_local_schema_seq_test_with_initial_data.nextval_test SET SCHEMA dist_schema_seq_test_with_initial_data; +NOTICE: Moving nextval_test into distributed schema dist_schema_seq_test_with_initial_data +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$dist_schema_seq_test_with_initial_data.nextval_test$$) +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_4; +CREATE SEQUENCE dist_schema_seq_test_with_initial_data.added_bigint_col_bigint_sequence AS bigint; +CREATE SEQUENCE dist_schema_seq_test_with_initial_data.added_bigint_col_int_sequence AS int; +CREATE SEQUENCE dist_schema_seq_test_with_initial_data.added_int_col_bigint_sequence AS bigint; +CREATE SEQUENCE dist_schema_seq_test_with_initial_data.added_int_col_int_sequence AS int; +CREATE SEQUENCE dist_schema_seq_test_with_initial_data.added_smallint_col_smallint_sequence AS smallint; +-- also create some sequences with custom settings +CREATE SEQUENCE dist_schema_seq_test_with_initial_data.added_custom_bigint_col_bigint_sequence AS bigint MINVALUE 1050 MAXVALUE 1000050 START WITH 5050 INCREMENT 150; +CREATE SEQUENCE dist_schema_seq_test_with_initial_data.added_custom_int_col_int_sequence AS int MINVALUE 150 MAXVALUE 2000050 START WITH 1000 INCREMENT 55; +CREATE SEQUENCE dist_schema_seq_test_with_initial_data.added_custom_smallint_col_smallint_sequence AS smallint MINVALUE 60 MAXVALUE 1050 START WITH 100 INCREMENT 65; +-- all fails because the table is not not empty +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_bigint_col_bigint_sequence'::regclass); +ERROR: cannot add a column involving DEFAULT nextval('..') because the table is not empty +HINT: You can first call ALTER TABLE .. ADD COLUMN .. smallint/int/bigint +Then set the default by ALTER TABLE .. ALTER COLUMN .. SET DEFAULT nextval('..') +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_bigint_col_int_sequence'::regclass); +ERROR: cannot add a column involving DEFAULT nextval('..') because the table is not empty +HINT: You can first call ALTER TABLE .. ADD COLUMN .. smallint/int/bigint +Then set the default by ALTER TABLE .. ALTER COLUMN .. SET DEFAULT nextval('..') +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_int_col_with_bigint_sequence int DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_int_col_bigint_sequence'::regclass); +ERROR: cannot add a column involving DEFAULT nextval('..') because the table is not empty +HINT: You can first call ALTER TABLE .. ADD COLUMN .. smallint/int/bigint +Then set the default by ALTER TABLE .. ALTER COLUMN .. SET DEFAULT nextval('..') +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_int_col_with_int_sequence int DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_int_col_int_sequence'::regclass); +ERROR: cannot add a column involving DEFAULT nextval('..') because the table is not empty +HINT: You can first call ALTER TABLE .. ADD COLUMN .. smallint/int/bigint +Then set the default by ALTER TABLE .. ALTER COLUMN .. SET DEFAULT nextval('..') +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_smallint_col_with_smallint_sequence smallint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_smallint_col_smallint_sequence'::regclass); +ERROR: cannot add a column involving DEFAULT nextval('..') because the table is not empty +HINT: You can first call ALTER TABLE .. ADD COLUMN .. smallint/int/bigint +Then set the default by ALTER TABLE .. ALTER COLUMN .. SET DEFAULT nextval('..') +-- so let's add the columns first, then alter their column default expressions +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_bigint_col_with_bigint_sequence bigint; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ALTER COLUMN added_bigint_col_with_bigint_sequence SET DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_bigint_col_bigint_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_5; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_bigint_col_with_int_sequence bigint; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ALTER COLUMN added_bigint_col_with_int_sequence SET DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_bigint_col_int_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_6; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_int_col_with_bigint_sequence int; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ALTER COLUMN added_int_col_with_bigint_sequence SET DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_int_col_bigint_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_7; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_int_col_with_int_sequence int; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ALTER COLUMN added_int_col_with_int_sequence SET DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_int_col_int_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_8; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN column_to_drop_9 text; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_9; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_smallint_col_with_smallint_sequence smallint; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ALTER COLUMN added_smallint_col_with_smallint_sequence SET DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_smallint_col_smallint_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_bigint_col_with_custom_bigint_sequence bigint; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ALTER COLUMN added_bigint_col_with_custom_bigint_sequence SET DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_custom_bigint_col_bigint_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_int_col_with_custom_int_sequence int; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ALTER COLUMN added_int_col_with_custom_int_sequence SET DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_custom_int_col_int_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_smallint_col_with_custom_smallint_sequence smallint; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ALTER COLUMN added_smallint_col_with_custom_smallint_sequence SET DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_custom_smallint_col_smallint_sequence'::regclass); +-- Check nextval sequences. +-- bigint_col_int_sequence and added_bigint_col_int_sequence should become bigint sequences, see EnsureDistributedSequencesHaveOneType() +SELECT result FROM run_command_on_all_nodes( +$$ +WITH sequence_info AS ( + SELECT name::regclass::text, (get_sequence_info(name::regclass)).* + FROM UNNEST(ARRAY[ + 'dist_schema_seq_test_with_initial_data.bigint_col_bigint_sequence', + 'dist_schema_seq_test_with_initial_data.bigint_col_int_sequence', + 'dist_schema_seq_test_with_initial_data.int_col_bigint_sequence', + 'initially_local_schema_seq_test_with_initial_data.int_col_int_sequence', + 'initially_local_schema_seq_test_with_initial_data.smallint_col_smallint_sequence', + 'dist_schema_seq_test_with_initial_data.added_bigint_col_bigint_sequence', + 'dist_schema_seq_test_with_initial_data.added_bigint_col_int_sequence', + 'dist_schema_seq_test_with_initial_data.added_int_col_bigint_sequence', + 'dist_schema_seq_test_with_initial_data.added_int_col_int_sequence', + 'dist_schema_seq_test_with_initial_data.added_smallint_col_smallint_sequence', + 'initially_local_schema_seq_test_with_initial_data.custom_bigint_col_bigint_sequence', + 'initially_local_schema_seq_test_with_initial_data.custom_int_col_int_sequence', + 'initially_local_schema_seq_test_with_initial_data.custom_smallint_col_smallint_sequence', + 'dist_schema_seq_test_with_initial_data.added_custom_bigint_col_bigint_sequence', + 'dist_schema_seq_test_with_initial_data.added_custom_int_col_int_sequence', + 'dist_schema_seq_test_with_initial_data.added_custom_smallint_col_smallint_sequence' + ]) AS qualified_sequence_name(name) +) +SELECT jsonb_agg( + jsonb_build_object( + 'name', name, + 'type_name', type_name, + 'start_value', start_value, + 'last_value', last_value, + 'min_value', min_value, + 'max_value', max_value + ) + ORDER BY name +) +FROM sequence_info; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + [{"name": "dist_schema_seq_test_with_initial_data.added_bigint_col_bigint_sequence", "max_value": 9223372036854775807, "min_value": 1, "type_name": "bigint", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.added_bigint_col_int_sequence", "max_value": 9223372036854775807, "min_value": 1, "type_name": "bigint", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.added_custom_bigint_col_bigint_sequence", "max_value": 1000050, "min_value": 1050, "type_name": "bigint", "last_value": 5050, "start_value": 5050}, {"name": "dist_schema_seq_test_with_initial_data.added_custom_int_col_int_sequence", "max_value": 2000050, "min_value": 150, "type_name": "integer", "last_value": 1000, "start_value": 1000}, {"name": "dist_schema_seq_test_with_initial_data.added_custom_smallint_col_smallint_sequence", "max_value": 1050, "min_value": 60, "type_name": "smallint", "last_value": 100, "start_value": 100}, {"name": "dist_schema_seq_test_with_initial_data.added_int_col_bigint_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.added_int_col_int_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.added_smallint_col_smallint_sequence", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.bigint_col_bigint_sequence", "max_value": 9223372036854775807, "min_value": 1, "type_name": "bigint", "last_value": 5, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.bigint_col_int_sequence", "max_value": 9223372036854775807, "min_value": 1, "type_name": "bigint", "last_value": 5, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.int_col_bigint_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 5, "start_value": 1}, {"name": "initially_local_schema_seq_test_with_initial_data.custom_bigint_col_bigint_sequence", "max_value": 1000000, "min_value": 1000, "type_name": "bigint", "last_value": 5400, "start_value": 5000}, {"name": "initially_local_schema_seq_test_with_initial_data.custom_int_col_int_sequence", "max_value": 2000000, "min_value": 100, "type_name": "integer", "last_value": 1020, "start_value": 1000}, {"name": "initially_local_schema_seq_test_with_initial_data.custom_smallint_col_smallint_sequence", "max_value": 1000, "min_value": 10, "type_name": "smallint", "last_value": 110, "start_value": 50}, {"name": "initially_local_schema_seq_test_with_initial_data.int_col_int_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 5, "start_value": 1}, {"name": "initially_local_schema_seq_test_with_initial_data.smallint_col_smallint_sequence", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 5, "start_value": 1}] + [{"name": "dist_schema_seq_test_with_initial_data.added_bigint_col_bigint_sequence", "max_value": 9570149208162305, "min_value": 9288674231451649, "type_name": "bigint", "last_value": 9288674231451649, "start_value": 9288674231451649}, {"name": "dist_schema_seq_test_with_initial_data.added_bigint_col_int_sequence", "max_value": 9570149208162305, "min_value": 9288674231451649, "type_name": "bigint", "last_value": 9288674231451649, "start_value": 9288674231451649}, {"name": "dist_schema_seq_test_with_initial_data.added_custom_bigint_col_bigint_sequence", "max_value": 9570149208162305, "min_value": 9288674231451649, "type_name": "bigint", "last_value": 9288674231451649, "start_value": 9288674231451649}, {"name": "dist_schema_seq_test_with_initial_data.added_custom_int_col_int_sequence", "max_value": 2000050, "min_value": 150, "type_name": "integer", "last_value": 2000050, "start_value": 1000}, {"name": "dist_schema_seq_test_with_initial_data.added_custom_smallint_col_smallint_sequence", "max_value": 1050, "min_value": 60, "type_name": "smallint", "last_value": 1050, "start_value": 100}, {"name": "dist_schema_seq_test_with_initial_data.added_int_col_bigint_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.added_int_col_int_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.added_smallint_col_smallint_sequence", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.bigint_col_bigint_sequence", "max_value": 9570149208162305, "min_value": 9288674231451649, "type_name": "bigint", "last_value": 9288674231451649, "start_value": 9288674231451649}, {"name": "dist_schema_seq_test_with_initial_data.bigint_col_int_sequence", "max_value": 9570149208162305, "min_value": 9288674231451649, "type_name": "bigint", "last_value": 9288674231451649, "start_value": 9288674231451649}, {"name": "dist_schema_seq_test_with_initial_data.int_col_bigint_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "initially_local_schema_seq_test_with_initial_data.custom_bigint_col_bigint_sequence", "max_value": 9570149208162305, "min_value": 9288674231451649, "type_name": "bigint", "last_value": 9288674231451649, "start_value": 9288674231451649}, {"name": "initially_local_schema_seq_test_with_initial_data.custom_int_col_int_sequence", "max_value": 2000000, "min_value": 100, "type_name": "integer", "last_value": 2000000, "start_value": 1000}, {"name": "initially_local_schema_seq_test_with_initial_data.custom_smallint_col_smallint_sequence", "max_value": 1000, "min_value": 10, "type_name": "smallint", "last_value": 1000, "start_value": 50}, {"name": "initially_local_schema_seq_test_with_initial_data.int_col_int_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "initially_local_schema_seq_test_with_initial_data.smallint_col_smallint_sequence", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}] + [{"name": "dist_schema_seq_test_with_initial_data.added_bigint_col_bigint_sequence", "max_value": 13510798882111489, "min_value": 13229323905400833, "type_name": "bigint", "last_value": 13229323905400833, "start_value": 13229323905400833}, {"name": "dist_schema_seq_test_with_initial_data.added_bigint_col_int_sequence", "max_value": 13510798882111489, "min_value": 13229323905400833, "type_name": "bigint", "last_value": 13229323905400833, "start_value": 13229323905400833}, {"name": "dist_schema_seq_test_with_initial_data.added_custom_bigint_col_bigint_sequence", "max_value": 13510798882111489, "min_value": 13229323905400833, "type_name": "bigint", "last_value": 13229323905400833, "start_value": 13229323905400833}, {"name": "dist_schema_seq_test_with_initial_data.added_custom_int_col_int_sequence", "max_value": 2000050, "min_value": 150, "type_name": "integer", "last_value": 2000050, "start_value": 1000}, {"name": "dist_schema_seq_test_with_initial_data.added_custom_smallint_col_smallint_sequence", "max_value": 1050, "min_value": 60, "type_name": "smallint", "last_value": 1050, "start_value": 100}, {"name": "dist_schema_seq_test_with_initial_data.added_int_col_bigint_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.added_int_col_int_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.added_smallint_col_smallint_sequence", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.bigint_col_bigint_sequence", "max_value": 13510798882111489, "min_value": 13229323905400833, "type_name": "bigint", "last_value": 13229323905400833, "start_value": 13229323905400833}, {"name": "dist_schema_seq_test_with_initial_data.bigint_col_int_sequence", "max_value": 13510798882111489, "min_value": 13229323905400833, "type_name": "bigint", "last_value": 13229323905400833, "start_value": 13229323905400833}, {"name": "dist_schema_seq_test_with_initial_data.int_col_bigint_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "initially_local_schema_seq_test_with_initial_data.custom_bigint_col_bigint_sequence", "max_value": 13510798882111489, "min_value": 13229323905400833, "type_name": "bigint", "last_value": 13229323905400833, "start_value": 13229323905400833}, {"name": "initially_local_schema_seq_test_with_initial_data.custom_int_col_int_sequence", "max_value": 2000000, "min_value": 100, "type_name": "integer", "last_value": 2000000, "start_value": 1000}, {"name": "initially_local_schema_seq_test_with_initial_data.custom_smallint_col_smallint_sequence", "max_value": 1000, "min_value": 10, "type_name": "smallint", "last_value": 1000, "start_value": 50}, {"name": "initially_local_schema_seq_test_with_initial_data.int_col_int_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "initially_local_schema_seq_test_with_initial_data.smallint_col_smallint_sequence", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}] +(3 rows) + +-- check nextval calls used in table definition +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('dist_schema_seq_test_with_initial_data.nextval_test') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + CREATE TABLE dist_schema_seq_test_with_initial_data.nextval_test (id integer, bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.bigint_col_bigint_sequence'::regclass), bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.bigint_col_int_sequence'::regclass), int_col_with_bigint_sequence integer DEFAULT nextval('dist_schema_seq_test_with_initial_data.int_col_bigint_sequence'::regclass), int_col_with_int_sequence integer DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.int_col_int_sequence'::regclass), smallint_col_with_smallint_sequence smallint DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.smallint_col_smallint_sequence'::regclass), bigint_col_with_custom_bigint_sequence bigint DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.custom_bigint_col_bigint_sequence'::regclass), int_col_with_custom_int_sequence integer DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.custom_int_col_int_sequence'::regclass), smallint_col_with_custom_smallint_sequence smallint DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.custom_smallint_col_smallint_sequence'::regclass), added_bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_bigint_col_bigint_sequence'::regclass), added_bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_bigint_col_int_sequence'::regclass), added_int_col_with_bigint_sequence integer DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_int_col_bigint_sequence'::regclass), added_int_col_with_int_sequence integer DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_int_col_int_sequence'::regclass), added_smallint_col_with_smallint_sequence smallint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_smallint_col_smallint_sequence'::regclass), added_bigint_col_with_custom_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_custom_bigint_col_bigint_sequence'::regclass), added_int_col_with_custom_int_sequence integer DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_custom_int_col_int_sequence'::regclass), added_smallint_col_with_custom_smallint_sequence smallint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_custom_smallint_col_smallint_sequence'::regclass)) USING heap; ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test OWNER TO postgres + CREATE TABLE dist_schema_seq_test_with_initial_data.nextval_test (id integer, bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.bigint_col_bigint_sequence'::regclass), bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.bigint_col_int_sequence'::regclass), int_col_with_bigint_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_with_initial_data.int_col_bigint_sequence'::regclass), int_col_with_int_sequence integer DEFAULT worker_nextval('initially_local_schema_seq_test_with_initial_data.int_col_int_sequence'::regclass), smallint_col_with_smallint_sequence smallint DEFAULT worker_nextval('initially_local_schema_seq_test_with_initial_data.smallint_col_smallint_sequence'::regclass), bigint_col_with_custom_bigint_sequence bigint DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.custom_bigint_col_bigint_sequence'::regclass), int_col_with_custom_int_sequence integer DEFAULT worker_nextval('initially_local_schema_seq_test_with_initial_data.custom_int_col_int_sequence'::regclass), smallint_col_with_custom_smallint_sequence smallint DEFAULT worker_nextval('initially_local_schema_seq_test_with_initial_data.custom_smallint_col_smallint_sequence'::regclass), added_bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_bigint_col_bigint_sequence'::regclass), added_bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_bigint_col_int_sequence'::regclass), added_int_col_with_bigint_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_with_initial_data.added_int_col_bigint_sequence'::regclass), added_int_col_with_int_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_with_initial_data.added_int_col_int_sequence'::regclass), added_smallint_col_with_smallint_sequence smallint DEFAULT worker_nextval('dist_schema_seq_test_with_initial_data.added_smallint_col_smallint_sequence'::regclass), added_bigint_col_with_custom_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_custom_bigint_col_bigint_sequence'::regclass), added_int_col_with_custom_int_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_with_initial_data.added_custom_int_col_int_sequence'::regclass), added_smallint_col_with_custom_smallint_sequence smallint DEFAULT worker_nextval('dist_schema_seq_test_with_initial_data.added_custom_smallint_col_smallint_sequence'::regclass)) USING heap; ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test OWNER TO postgres + CREATE TABLE dist_schema_seq_test_with_initial_data.nextval_test (id integer, bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.bigint_col_bigint_sequence'::regclass), bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.bigint_col_int_sequence'::regclass), int_col_with_bigint_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_with_initial_data.int_col_bigint_sequence'::regclass), int_col_with_int_sequence integer DEFAULT worker_nextval('initially_local_schema_seq_test_with_initial_data.int_col_int_sequence'::regclass), smallint_col_with_smallint_sequence smallint DEFAULT worker_nextval('initially_local_schema_seq_test_with_initial_data.smallint_col_smallint_sequence'::regclass), bigint_col_with_custom_bigint_sequence bigint DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.custom_bigint_col_bigint_sequence'::regclass), int_col_with_custom_int_sequence integer DEFAULT worker_nextval('initially_local_schema_seq_test_with_initial_data.custom_int_col_int_sequence'::regclass), smallint_col_with_custom_smallint_sequence smallint DEFAULT worker_nextval('initially_local_schema_seq_test_with_initial_data.custom_smallint_col_smallint_sequence'::regclass), added_bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_bigint_col_bigint_sequence'::regclass), added_bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_bigint_col_int_sequence'::regclass), added_int_col_with_bigint_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_with_initial_data.added_int_col_bigint_sequence'::regclass), added_int_col_with_int_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_with_initial_data.added_int_col_int_sequence'::regclass), added_smallint_col_with_smallint_sequence smallint DEFAULT worker_nextval('dist_schema_seq_test_with_initial_data.added_smallint_col_smallint_sequence'::regclass), added_bigint_col_with_custom_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_custom_bigint_col_bigint_sequence'::regclass), added_int_col_with_custom_int_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_with_initial_data.added_custom_int_col_int_sequence'::regclass), added_smallint_col_with_custom_smallint_sequence smallint DEFAULT worker_nextval('dist_schema_seq_test_with_initial_data.added_custom_smallint_col_smallint_sequence'::regclass)) USING heap; ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test OWNER TO postgres +(3 rows) + +-- Should succeed on all nodes as we don't try inserting column default values +-- for the columns that are using int / smallint based sequences. +-- Doing so is okay from the coordinator but would cause an error on workers. +SELECT result FROM run_command_on_all_nodes( + $$ + WITH ins AS ( + INSERT INTO dist_schema_seq_test_with_initial_data.nextval_test VALUES (10, DEFAULT, DEFAULT, 1, 1, 1, DEFAULT, 1, 1, DEFAULT, DEFAULT, 1, 1, 1, DEFAULT, 1, 1) RETURNING * + ) + SELECT to_jsonb(ins) FROM ins; + $$, + parallel => false +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + {"id": 10, "int_col_with_int_sequence": 1, "bigint_col_with_int_sequence": 6, "int_col_with_bigint_sequence": 1, "added_int_col_with_int_sequence": 1, "bigint_col_with_bigint_sequence": 6, "int_col_with_custom_int_sequence": 1, "added_bigint_col_with_int_sequence": 1, "added_int_col_with_bigint_sequence": 1, "smallint_col_with_smallint_sequence": 1, "added_bigint_col_with_bigint_sequence": 1, "added_int_col_with_custom_int_sequence": 1, "bigint_col_with_custom_bigint_sequence": 5500, "added_smallint_col_with_smallint_sequence": 1, "smallint_col_with_custom_smallint_sequence": 1, "added_bigint_col_with_custom_bigint_sequence": 5050, "added_smallint_col_with_custom_smallint_sequence": 1} + {"id": 10, "int_col_with_int_sequence": 1, "bigint_col_with_int_sequence": 9288674231451649, "int_col_with_bigint_sequence": 1, "added_int_col_with_int_sequence": 1, "bigint_col_with_bigint_sequence": 9288674231451649, "int_col_with_custom_int_sequence": 1, "added_bigint_col_with_int_sequence": 9288674231451649, "added_int_col_with_bigint_sequence": 1, "smallint_col_with_smallint_sequence": 1, "added_bigint_col_with_bigint_sequence": 9288674231451649, "added_int_col_with_custom_int_sequence": 1, "bigint_col_with_custom_bigint_sequence": 9288674231451649, "added_smallint_col_with_smallint_sequence": 1, "smallint_col_with_custom_smallint_sequence": 1, "added_bigint_col_with_custom_bigint_sequence": 9288674231451649, "added_smallint_col_with_custom_smallint_sequence": 1} + {"id": 10, "int_col_with_int_sequence": 1, "bigint_col_with_int_sequence": 13229323905400833, "int_col_with_bigint_sequence": 1, "added_int_col_with_int_sequence": 1, "bigint_col_with_bigint_sequence": 13229323905400833, "int_col_with_custom_int_sequence": 1, "added_bigint_col_with_int_sequence": 13229323905400833, "added_int_col_with_bigint_sequence": 1, "smallint_col_with_smallint_sequence": 1, "added_bigint_col_with_bigint_sequence": 13229323905400833, "added_int_col_with_custom_int_sequence": 1, "bigint_col_with_custom_bigint_sequence": 13229323905400833, "added_smallint_col_with_smallint_sequence": 1, "smallint_col_with_custom_smallint_sequence": 1, "added_bigint_col_with_custom_bigint_sequence": 13229323905400833, "added_smallint_col_with_custom_smallint_sequence": 1} +(3 rows) + +-- succeeds on the coordinator +SELECT result FROM run_command_on_coordinator( + $$ + WITH ins AS ( + INSERT INTO dist_schema_seq_test_with_initial_data.nextval_test VALUES (11, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT) RETURNING * + ) + SELECT to_jsonb(ins) FROM ins; + $$ +); + result +--------------------------------------------------------------------- + {"id": 11, "int_col_with_int_sequence": 6, "bigint_col_with_int_sequence": 7, "int_col_with_bigint_sequence": 6, "added_int_col_with_int_sequence": 1, "bigint_col_with_bigint_sequence": 7, "int_col_with_custom_int_sequence": 1025, "added_bigint_col_with_int_sequence": 2, "added_int_col_with_bigint_sequence": 1, "smallint_col_with_smallint_sequence": 6, "added_bigint_col_with_bigint_sequence": 2, "added_int_col_with_custom_int_sequence": 1000, "bigint_col_with_custom_bigint_sequence": 5600, "added_smallint_col_with_smallint_sequence": 1, "smallint_col_with_custom_smallint_sequence": 125, "added_bigint_col_with_custom_bigint_sequence": 5200, "added_smallint_col_with_custom_smallint_sequence": 100} +(1 row) + +-- all fail on workers +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_with_initial_data.nextval_test VALUES (1, 1, 1, DEFAULT, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)$$, parallel => false); + result +--------------------------------------------------------------------- + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint +(2 rows) + +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_with_initial_data.nextval_test VALUES (1, 1, 1, 1, DEFAULT, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)$$, parallel => false); + result +--------------------------------------------------------------------- + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint +(2 rows) + +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_with_initial_data.nextval_test VALUES (1, 1, 1, 1, 1, DEFAULT, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)$$, parallel => false); + result +--------------------------------------------------------------------- + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint +(2 rows) + +SELECT * FROM dist_schema_seq_test_with_initial_data.nextval_test ORDER BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17; + id | bigint_col_with_bigint_sequence | bigint_col_with_int_sequence | int_col_with_bigint_sequence | int_col_with_int_sequence | smallint_col_with_smallint_sequence | bigint_col_with_custom_bigint_sequence | int_col_with_custom_int_sequence | smallint_col_with_custom_smallint_sequence | added_bigint_col_with_bigint_sequence | added_bigint_col_with_int_sequence | added_int_col_with_bigint_sequence | added_int_col_with_int_sequence | added_smallint_col_with_smallint_sequence | added_bigint_col_with_custom_bigint_sequence | added_int_col_with_custom_int_sequence | added_smallint_col_with_custom_smallint_sequence +--------------------------------------------------------------------- + 1 | 1 | 1 | 1 | 1 | 1 | 5000 | 1000 | 50 | | | | | | | | + 2 | 2 | 2 | 2 | 2 | 2 | 5100 | 1005 | 65 | | | | | | | | + 3 | 3 | 3 | 3 | 3 | 3 | 5200 | 1010 | 80 | | | | | | | | + 4 | 4 | 4 | 4 | 4 | 4 | 5300 | 1015 | 95 | | | | | | | | + 5 | 5 | 5 | 5 | 5 | 5 | 5400 | 1020 | 110 | | | | | | | | + 10 | 6 | 6 | 1 | 1 | 1 | 5500 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 5050 | 1 | 1 + 10 | 9288674231451649 | 9288674231451649 | 1 | 1 | 1 | 9288674231451649 | 1 | 1 | 9288674231451649 | 9288674231451649 | 1 | 1 | 1 | 9288674231451649 | 1 | 1 + 10 | 13229323905400833 | 13229323905400833 | 1 | 1 | 1 | 13229323905400833 | 1 | 1 | 13229323905400833 | 13229323905400833 | 1 | 1 | 1 | 13229323905400833 | 1 | 1 + 11 | 7 | 7 | 6 | 6 | 6 | 5600 | 1025 | 125 | 2 | 2 | 1 | 1 | 1 | 5200 | 1000 | 100 +(9 rows) + +DROP TABLE dist_schema_seq_test_with_initial_data.nextval_test; +-- After dropping the table, make sure that only the sequences for which +-- we didn't execute "alter sequence ... owned by column" are left. +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT array_agg(sequencename ORDER BY sequencename) AS sequences +FROM pg_sequences +WHERE + schemaname IN ( + 'initially_local_schema_seq_test_with_initial_data', + 'dist_schema_seq_test_with_initial_data' + ) AND + sequencename IN ( + 'bigint_col_bigint_sequence', + 'bigint_col_int_sequence', + 'int_col_bigint_sequence', + 'int_col_int_sequence', + 'smallint_col_smallint_sequence', + 'added_bigint_col_bigint_sequence', + 'added_bigint_col_int_sequence', + 'added_int_col_bigint_sequence', + 'added_int_col_int_sequence', + 'added_smallint_col_smallint_sequence', + 'custom_bigint_col_bigint_sequence', + 'custom_int_col_int_sequence', + 'custom_smallint_col_smallint_sequence', + 'added_custom_bigint_col_bigint_sequence', + 'added_custom_int_col_int_sequence', + 'added_custom_smallint_col_smallint_sequence' + ); +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + {added_bigint_col_bigint_sequence,added_bigint_col_int_sequence,added_custom_bigint_col_bigint_sequence,added_custom_int_col_int_sequence,added_custom_smallint_col_smallint_sequence,added_int_col_bigint_sequence,added_int_col_int_sequence,added_smallint_col_smallint_sequence,custom_bigint_col_bigint_sequence,custom_int_col_int_sequence,custom_smallint_col_smallint_sequence,int_col_int_sequence,smallint_col_smallint_sequence} + {added_bigint_col_bigint_sequence,added_bigint_col_int_sequence,added_custom_bigint_col_bigint_sequence,added_custom_int_col_int_sequence,added_custom_smallint_col_smallint_sequence,added_int_col_bigint_sequence,added_int_col_int_sequence,added_smallint_col_smallint_sequence,custom_bigint_col_bigint_sequence,custom_int_col_int_sequence,custom_smallint_col_smallint_sequence,int_col_int_sequence,smallint_col_smallint_sequence} + {added_bigint_col_bigint_sequence,added_bigint_col_int_sequence,added_custom_bigint_col_bigint_sequence,added_custom_int_col_int_sequence,added_custom_smallint_col_smallint_sequence,added_int_col_bigint_sequence,added_int_col_int_sequence,added_smallint_col_smallint_sequence,custom_bigint_col_bigint_sequence,custom_int_col_int_sequence,custom_smallint_col_smallint_sequence,int_col_int_sequence,smallint_col_smallint_sequence} +(3 rows) + +-- create a table with built-in sequences under initially_local_schema_seq_test_with_initial_data, and move the table to dist_schema_seq_test_with_initial_data +CREATE TABLE initially_local_schema_seq_test_with_initial_data.built_in_seq_test ( + id int, + column_to_drop_1 text, + smallserial_col smallserial, + column_to_drop_2 text, + column_to_drop_3 text, + serial_col serial, + column_to_drop_4 text, + bigserial_col bigserial, + column_to_drop_5 text, + generated_smallint_col smallint GENERATED BY DEFAULT AS IDENTITY, + column_to_drop_6 text, + column_to_drop_7 text, + generated_int_col int GENERATED BY DEFAULT AS IDENTITY, + generated_bigint_col bigint GENERATED BY DEFAULT AS IDENTITY (START WITH 178 INCREMENT BY 17 MINVALUE 100 MAXVALUE 1500600) +); +INSERT INTO initially_local_schema_seq_test_with_initial_data.built_in_seq_test (id) SELECT i FROM generate_series(1, 5) AS i; +ALTER TABLE initially_local_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN column_to_drop_1; +ALTER TABLE initially_local_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN column_to_drop_2; +ALTER TABLE initially_local_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN column_to_drop_4; +ALTER TABLE initially_local_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN column_to_drop_6; +ALTER TABLE initially_local_schema_seq_test_with_initial_data.built_in_seq_test SET SCHEMA dist_schema_seq_test_with_initial_data; +NOTICE: Moving built_in_seq_test into distributed schema dist_schema_seq_test_with_initial_data +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$dist_schema_seq_test_with_initial_data.built_in_seq_test$$) +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN column_to_drop_3; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN column_to_drop_5; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN column_to_drop_7; +-- all fails we cannot add serial-based columns as well as identity columns +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_smallserial_col smallserial; +ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes when metadata is synchronized to workers +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_serial_col serial; +ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes when metadata is synchronized to workers +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_bigserial_col bigserial; +ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes when metadata is synchronized to workers +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_smallint_col smallint GENERATED BY DEFAULT AS IDENTITY; +ERROR: cannot execute ADD COLUMN commands involving identity columns when metadata is synchronized to workers +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_int_col int GENERATED BY DEFAULT AS IDENTITY; +ERROR: cannot execute ADD COLUMN commands involving identity columns when metadata is synchronized to workers +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_bigint_col bigint GENERATED BY DEFAULT AS IDENTITY; +ERROR: cannot execute ADD COLUMN commands involving identity columns when metadata is synchronized to workers +-- also, we cannot add a column and just alter its type to a serial-based one later, but normally Postgres allows altering the columns to identity columns later, but we don't +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_smallint_col smallint GENERATED BY DEFAULT AS IDENTITY; +ERROR: cannot execute ADD COLUMN commands involving identity columns when metadata is synchronized to workers +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_int_col int GENERATED BY DEFAULT AS IDENTITY; +ERROR: cannot execute ADD COLUMN commands involving identity columns when metadata is synchronized to workers +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_bigint_col bigint GENERATED BY DEFAULT AS IDENTITY; +ERROR: cannot execute ADD COLUMN commands involving identity columns when metadata is synchronized to workers +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_smallint_col smallint; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ALTER COLUMN added_generated_smallint_col ADD GENERATED BY DEFAULT AS IDENTITY; +ERROR: alter table command is currently unsupported +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_int_col int; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ALTER COLUMN added_generated_int_col ADD GENERATED BY DEFAULT AS IDENTITY; +ERROR: alter table command is currently unsupported +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_bigint_col bigint; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ALTER COLUMN added_generated_bigint_col ADD GENERATED BY DEFAULT AS IDENTITY; +ERROR: alter table command is currently unsupported +DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported. +-- let's drop them too as they're not useful now +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN added_generated_smallint_col; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN added_generated_int_col; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN added_generated_bigint_col; +-- check built-in sequences +SELECT result FROM run_command_on_all_nodes( +$$ +WITH sequence_info AS ( + SELECT oid::regclass::text AS name, (get_sequence_info(oid)).* + FROM ( + SELECT objid + FROM pg_depend d + JOIN pg_class c ON c.oid = d.objid + WHERE d.refobjid = 'dist_schema_seq_test_with_initial_data.built_in_seq_test'::regclass AND + c.relkind = 'S' + ) sequence(oid) +) +SELECT jsonb_agg( + jsonb_build_object( + 'name', name, + 'type_name', type_name, + 'start_value', start_value, + 'last_value', last_value, + 'min_value', min_value, + 'max_value', max_value + ) + ORDER BY name +) +FROM sequence_info; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + [{"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_bigserial_col_seq", "max_value": 9223372036854775807, "min_value": 1, "type_name": "bigint", "last_value": 5, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_generated_bigint_col_seq", "max_value": 1500600, "min_value": 100, "type_name": "bigint", "last_value": 246, "start_value": 178}, {"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_generated_int_col_seq", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 5, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_generated_smallint_col_seq", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 5, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_serial_col_seq", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 5, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_smallserial_col_seq", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 5, "start_value": 1}] + [{"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_bigserial_col_seq", "max_value": 9570149208162305, "min_value": 9288674231451649, "type_name": "bigint", "last_value": 9288674231451649, "start_value": 9288674231451649}, {"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_generated_bigint_col_seq", "max_value": 9570149208162305, "min_value": 9288674231451649, "type_name": "bigint", "last_value": 9288674231451649, "start_value": 9288674231451649}, {"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_generated_int_col_seq", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_generated_smallint_col_seq", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_serial_col_seq", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_smallserial_col_seq", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}] + [{"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_bigserial_col_seq", "max_value": 13510798882111489, "min_value": 13229323905400833, "type_name": "bigint", "last_value": 13229323905400833, "start_value": 13229323905400833}, {"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_generated_bigint_col_seq", "max_value": 13510798882111489, "min_value": 13229323905400833, "type_name": "bigint", "last_value": 13229323905400833, "start_value": 13229323905400833}, {"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_generated_int_col_seq", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_generated_smallint_col_seq", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_serial_col_seq", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_with_initial_data.built_in_seq_test_smallserial_col_seq", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}] +(3 rows) + +-- check nextval calls used in table definition +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('dist_schema_seq_test_with_initial_data.built_in_seq_test') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + CREATE TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test (id integer, smallserial_col smallint DEFAULT nextval('dist_schema_seq_test_with_initial_data.built_in_seq_test_smallserial_col_seq'::regclass) NOT NULL, serial_col integer DEFAULT nextval('dist_schema_seq_test_with_initial_data.built_in_seq_test_serial_col_seq'::regclass) NOT NULL, bigserial_col bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.built_in_seq_test_bigserial_col_seq'::regclass) NOT NULL, generated_smallint_col smallint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 1 MAXVALUE 32767 START WITH 1 CACHE 1 NO CYCLE) NOT NULL, generated_int_col integer GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 1 MAXVALUE 2147483647 START WITH 1 CACHE 1 NO CYCLE) NOT NULL, generated_bigint_col bigint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 17 MINVALUE 100 MAXVALUE 1500600 START WITH 178 CACHE 1 NO CYCLE) NOT NULL) USING heap; ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test OWNER TO postgres + CREATE TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test (id integer, smallserial_col smallint DEFAULT worker_nextval('dist_schema_seq_test_with_initial_data.built_in_seq_test_smallserial_col_seq'::regclass) NOT NULL, serial_col integer DEFAULT worker_nextval('dist_schema_seq_test_with_initial_data.built_in_seq_test_serial_col_seq'::regclass) NOT NULL, bigserial_col bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.built_in_seq_test_bigserial_col_seq'::regclass) NOT NULL, generated_smallint_col smallint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 1 MAXVALUE 32767 START WITH 1 CACHE 1 NO CYCLE) NOT NULL, generated_int_col integer GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 1 MAXVALUE 2147483647 START WITH 1 CACHE 1 NO CYCLE) NOT NULL, generated_bigint_col bigint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 17 MINVALUE 9288674231451649 MAXVALUE 9570149208162305 START WITH 9288674231451649 CACHE 1 NO CYCLE) NOT NULL) USING heap; ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test OWNER TO postgres + CREATE TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test (id integer, smallserial_col smallint DEFAULT worker_nextval('dist_schema_seq_test_with_initial_data.built_in_seq_test_smallserial_col_seq'::regclass) NOT NULL, serial_col integer DEFAULT worker_nextval('dist_schema_seq_test_with_initial_data.built_in_seq_test_serial_col_seq'::regclass) NOT NULL, bigserial_col bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.built_in_seq_test_bigserial_col_seq'::regclass) NOT NULL, generated_smallint_col smallint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 1 MAXVALUE 32767 START WITH 1 CACHE 1 NO CYCLE) NOT NULL, generated_int_col integer GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 1 MAXVALUE 2147483647 START WITH 1 CACHE 1 NO CYCLE) NOT NULL, generated_bigint_col bigint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 17 MINVALUE 13229323905400833 MAXVALUE 13510798882111489 START WITH 13229323905400833 CACHE 1 NO CYCLE) NOT NULL) USING heap; ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test OWNER TO postgres +(3 rows) + +-- Should succeed on all nodes as we don't try inserting column default values +-- for the columns that are using int / smallint based sequences. +-- Doing so is okay from the coordinator but would cause an error on workers. +SELECT result FROM run_command_on_all_nodes( + $$ + WITH ins AS ( + INSERT INTO dist_schema_seq_test_with_initial_data.built_in_seq_test VALUES (10, 1, 1, 1, 1, 1, DEFAULT) RETURNING * + ) + SELECT to_jsonb(ins) FROM ins; + $$, + parallel => false +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + {"id": 10, "serial_col": 1, "bigserial_col": 1, "smallserial_col": 1, "generated_int_col": 1, "generated_bigint_col": 263, "generated_smallint_col": 1} + {"id": 10, "serial_col": 1, "bigserial_col": 1, "smallserial_col": 1, "generated_int_col": 1, "generated_bigint_col": 9288674231451649, "generated_smallint_col": 1} + {"id": 10, "serial_col": 1, "bigserial_col": 1, "smallserial_col": 1, "generated_int_col": 1, "generated_bigint_col": 13229323905400833, "generated_smallint_col": 1} +(3 rows) + +-- succeeds on the coordinator +SELECT result FROM run_command_on_coordinator( + $$ + WITH ins AS ( + INSERT INTO dist_schema_seq_test_with_initial_data.built_in_seq_test VALUES (11, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT) RETURNING * + ) + SELECT to_jsonb(ins) FROM ins; + $$ +); + result +--------------------------------------------------------------------- + {"id": 11, "serial_col": 6, "bigserial_col": 6, "smallserial_col": 6, "generated_int_col": 6, "generated_bigint_col": 280, "generated_smallint_col": 6} +(1 row) + +-- all fail on workers +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_with_initial_data.built_in_seq_test VALUES (1, DEFAULT, 1, 1, 1, 1, 1)$$, parallel => false); + result +--------------------------------------------------------------------- + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint +(2 rows) + +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_with_initial_data.built_in_seq_test VALUES (1, 1, DEFAULT, 1, 1, 1, 1)$$, parallel => false); + result +--------------------------------------------------------------------- + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint +(2 rows) + +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_with_initial_data.built_in_seq_test VALUES (1, 1, 1, 1, DEFAULT, 1, 1)$$, parallel => false); + result +--------------------------------------------------------------------- + ERROR: nextval: reached maximum value of sequence "built_in_seq_test_generated_smallint_col_seq" (32767) + ERROR: nextval: reached maximum value of sequence "built_in_seq_test_generated_smallint_col_seq" (32767) +(2 rows) + +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_with_initial_data.built_in_seq_test VALUES (1, 1, 1, 1, 1, DEFAULT, 1)$$, parallel => false); + result +--------------------------------------------------------------------- + ERROR: nextval: reached maximum value of sequence "built_in_seq_test_generated_int_col_seq" (2147483647) + ERROR: nextval: reached maximum value of sequence "built_in_seq_test_generated_int_col_seq" (2147483647) +(2 rows) + +SELECT * FROM dist_schema_seq_test_with_initial_data.built_in_seq_test ORDER BY 1, 2, 3, 4, 5, 6, 7; + id | smallserial_col | serial_col | bigserial_col | generated_smallint_col | generated_int_col | generated_bigint_col +--------------------------------------------------------------------- + 1 | 1 | 1 | 1 | 1 | 1 | 178 + 2 | 2 | 2 | 2 | 2 | 2 | 195 + 3 | 3 | 3 | 3 | 3 | 3 | 212 + 4 | 4 | 4 | 4 | 4 | 4 | 229 + 5 | 5 | 5 | 5 | 5 | 5 | 246 + 10 | 1 | 1 | 1 | 1 | 1 | 263 + 10 | 1 | 1 | 1 | 1 | 1 | 9288674231451649 + 10 | 1 | 1 | 1 | 1 | 1 | 13229323905400833 + 11 | 6 | 6 | 6 | 6 | 6 | 280 +(9 rows) + +-- create sequences and a table under dist_schema_seq_test_without_initial_data +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.bigint_col_bigint_sequence AS bigint MINVALUE 1000 MAXVALUE 1000000 START WITH 5000 INCREMENT 100; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.bigint_col_int_sequence AS int; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.int_col_bigint_sequence AS bigint; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.int_col_int_sequence AS int; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.smallint_col_smallint_sequence AS smallint; +CREATE TABLE dist_schema_seq_test_without_initial_data.nextval_test ( + id int, + bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.bigint_col_bigint_sequence'::regclass), + bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.bigint_col_int_sequence'::regclass), + int_col_with_bigint_sequence int DEFAULT nextval('dist_schema_seq_test_without_initial_data.int_col_bigint_sequence'::regclass), + int_col_with_int_sequence int DEFAULT nextval('dist_schema_seq_test_without_initial_data.int_col_int_sequence'::regclass), + smallint_col_with_smallint_sequence smallint DEFAULT nextval('dist_schema_seq_test_without_initial_data.smallint_col_smallint_sequence'::regclass) +); +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_bigint_col_bigint_sequence AS bigint; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_bigint_col_int_sequence AS int; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_int_col_bigint_sequence AS bigint; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_int_col_int_sequence AS int; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_smallint_col_smallint_sequence AS smallint; +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_bigint_col_bigint_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_bigint_col_int_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_int_col_with_bigint_sequence int DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_int_col_bigint_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_int_col_with_int_sequence int DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_int_col_int_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_smallint_col_with_smallint_sequence smallint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_smallint_col_smallint_sequence'::regclass); +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_altered_bigint_col_bigint_sequence AS bigint; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_altered_bigint_col_int_sequence AS int; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_altered_int_col_bigint_sequence AS bigint; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_altered_int_col_int_sequence AS int; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_altered_smallint_col_smallint_sequence AS smallint; +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_altered_bigint_col_with_bigint_sequence bigint; +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ALTER COLUMN added_altered_bigint_col_with_bigint_sequence SET DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_bigint_col_bigint_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_altered_bigint_col_with_int_sequence bigint; +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ALTER COLUMN added_altered_bigint_col_with_int_sequence SET DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_bigint_col_int_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_altered_int_col_with_bigint_sequence int; +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ALTER COLUMN added_altered_int_col_with_bigint_sequence SET DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_int_col_bigint_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_altered_int_col_with_int_sequence int; +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ALTER COLUMN added_altered_int_col_with_int_sequence SET DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_int_col_int_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_altered_smallint_col_with_smallint_sequence smallint; +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ALTER COLUMN added_altered_smallint_col_with_smallint_sequence SET DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_smallint_col_smallint_sequence'::regclass); +-- Check nextval sequences. +-- bigint_col_int_sequence, added_bigint_col_int_sequence and added_altered_bigint_col_int_sequence should become bigint sequences, see EnsureDistributedSequencesHaveOneType() +SELECT result FROM run_command_on_all_nodes( +$$ +WITH sequence_info AS ( + SELECT name::regclass::text, (get_sequence_info(name::regclass)).* + FROM UNNEST(ARRAY[ + 'dist_schema_seq_test_without_initial_data.bigint_col_bigint_sequence', + 'dist_schema_seq_test_without_initial_data.bigint_col_int_sequence', + 'dist_schema_seq_test_without_initial_data.int_col_bigint_sequence', + 'dist_schema_seq_test_without_initial_data.int_col_int_sequence', + 'dist_schema_seq_test_without_initial_data.smallint_col_smallint_sequence', + 'dist_schema_seq_test_without_initial_data.added_bigint_col_bigint_sequence', + 'dist_schema_seq_test_without_initial_data.added_bigint_col_int_sequence', + 'dist_schema_seq_test_without_initial_data.added_int_col_bigint_sequence', + 'dist_schema_seq_test_without_initial_data.added_int_col_int_sequence', + 'dist_schema_seq_test_without_initial_data.added_smallint_col_smallint_sequence', + 'dist_schema_seq_test_without_initial_data.added_altered_bigint_col_bigint_sequence', + 'dist_schema_seq_test_without_initial_data.added_altered_bigint_col_int_sequence', + 'dist_schema_seq_test_without_initial_data.added_altered_int_col_bigint_sequence', + 'dist_schema_seq_test_without_initial_data.added_altered_int_col_int_sequence', + 'dist_schema_seq_test_without_initial_data.added_altered_smallint_col_smallint_sequence' + ]) AS qualified_sequence_name(name) +) +SELECT jsonb_agg( + jsonb_build_object( + 'name', name, + 'type_name', type_name, + 'start_value', start_value, + 'last_value', last_value, + 'min_value', min_value, + 'max_value', max_value + ) + ORDER BY name +) +FROM sequence_info; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + [{"name": "dist_schema_seq_test_without_initial_data.added_altered_bigint_col_bigint_sequence", "max_value": 9223372036854775807, "min_value": 1, "type_name": "bigint", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_altered_bigint_col_int_sequence", "max_value": 9223372036854775807, "min_value": 1, "type_name": "bigint", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_altered_int_col_bigint_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_altered_int_col_int_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_altered_smallint_col_smallint_sequence", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_bigint_col_bigint_sequence", "max_value": 9223372036854775807, "min_value": 1, "type_name": "bigint", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_bigint_col_int_sequence", "max_value": 9223372036854775807, "min_value": 1, "type_name": "bigint", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_int_col_bigint_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_int_col_int_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_smallint_col_smallint_sequence", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.bigint_col_bigint_sequence", "max_value": 1000000, "min_value": 1000, "type_name": "bigint", "last_value": 5000, "start_value": 5000}, {"name": "dist_schema_seq_test_without_initial_data.bigint_col_int_sequence", "max_value": 9223372036854775807, "min_value": 1, "type_name": "bigint", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.int_col_bigint_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.int_col_int_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.smallint_col_smallint_sequence", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 1, "start_value": 1}] + [{"name": "dist_schema_seq_test_without_initial_data.added_altered_bigint_col_bigint_sequence", "max_value": 9570149208162305, "min_value": 9288674231451649, "type_name": "bigint", "last_value": 9288674231451649, "start_value": 9288674231451649}, {"name": "dist_schema_seq_test_without_initial_data.added_altered_bigint_col_int_sequence", "max_value": 9570149208162305, "min_value": 9288674231451649, "type_name": "bigint", "last_value": 9288674231451649, "start_value": 9288674231451649}, {"name": "dist_schema_seq_test_without_initial_data.added_altered_int_col_bigint_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_altered_int_col_int_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_altered_smallint_col_smallint_sequence", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_bigint_col_bigint_sequence", "max_value": 9570149208162305, "min_value": 9288674231451649, "type_name": "bigint", "last_value": 9288674231451649, "start_value": 9288674231451649}, {"name": "dist_schema_seq_test_without_initial_data.added_bigint_col_int_sequence", "max_value": 9570149208162305, "min_value": 9288674231451649, "type_name": "bigint", "last_value": 9288674231451649, "start_value": 9288674231451649}, {"name": "dist_schema_seq_test_without_initial_data.added_int_col_bigint_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_int_col_int_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_smallint_col_smallint_sequence", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.bigint_col_bigint_sequence", "max_value": 9570149208162305, "min_value": 9288674231451649, "type_name": "bigint", "last_value": 9288674231451649, "start_value": 9288674231451649}, {"name": "dist_schema_seq_test_without_initial_data.bigint_col_int_sequence", "max_value": 9570149208162305, "min_value": 9288674231451649, "type_name": "bigint", "last_value": 9288674231451649, "start_value": 9288674231451649}, {"name": "dist_schema_seq_test_without_initial_data.int_col_bigint_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.int_col_int_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.smallint_col_smallint_sequence", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}] + [{"name": "dist_schema_seq_test_without_initial_data.added_altered_bigint_col_bigint_sequence", "max_value": 13510798882111489, "min_value": 13229323905400833, "type_name": "bigint", "last_value": 13229323905400833, "start_value": 13229323905400833}, {"name": "dist_schema_seq_test_without_initial_data.added_altered_bigint_col_int_sequence", "max_value": 13510798882111489, "min_value": 13229323905400833, "type_name": "bigint", "last_value": 13229323905400833, "start_value": 13229323905400833}, {"name": "dist_schema_seq_test_without_initial_data.added_altered_int_col_bigint_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_altered_int_col_int_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_altered_smallint_col_smallint_sequence", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_bigint_col_bigint_sequence", "max_value": 13510798882111489, "min_value": 13229323905400833, "type_name": "bigint", "last_value": 13229323905400833, "start_value": 13229323905400833}, {"name": "dist_schema_seq_test_without_initial_data.added_bigint_col_int_sequence", "max_value": 13510798882111489, "min_value": 13229323905400833, "type_name": "bigint", "last_value": 13229323905400833, "start_value": 13229323905400833}, {"name": "dist_schema_seq_test_without_initial_data.added_int_col_bigint_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_int_col_int_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.added_smallint_col_smallint_sequence", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.bigint_col_bigint_sequence", "max_value": 13510798882111489, "min_value": 13229323905400833, "type_name": "bigint", "last_value": 13229323905400833, "start_value": 13229323905400833}, {"name": "dist_schema_seq_test_without_initial_data.bigint_col_int_sequence", "max_value": 13510798882111489, "min_value": 13229323905400833, "type_name": "bigint", "last_value": 13229323905400833, "start_value": 13229323905400833}, {"name": "dist_schema_seq_test_without_initial_data.int_col_bigint_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.int_col_int_sequence", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.smallint_col_smallint_sequence", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}] +(3 rows) + +-- check nextval calls used in table definition +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('dist_schema_seq_test_without_initial_data.nextval_test') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + CREATE TABLE dist_schema_seq_test_without_initial_data.nextval_test (id integer, bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.bigint_col_bigint_sequence'::regclass), bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.bigint_col_int_sequence'::regclass), int_col_with_bigint_sequence integer DEFAULT nextval('dist_schema_seq_test_without_initial_data.int_col_bigint_sequence'::regclass), int_col_with_int_sequence integer DEFAULT nextval('dist_schema_seq_test_without_initial_data.int_col_int_sequence'::regclass), smallint_col_with_smallint_sequence smallint DEFAULT nextval('dist_schema_seq_test_without_initial_data.smallint_col_smallint_sequence'::regclass), added_bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_bigint_col_bigint_sequence'::regclass), added_bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_bigint_col_int_sequence'::regclass), added_int_col_with_bigint_sequence integer DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_int_col_bigint_sequence'::regclass), added_int_col_with_int_sequence integer DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_int_col_int_sequence'::regclass), added_smallint_col_with_smallint_sequence smallint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_smallint_col_smallint_sequence'::regclass), added_altered_bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_bigint_col_bigint_sequence'::regclass), added_altered_bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_bigint_col_int_sequence'::regclass), added_altered_int_col_with_bigint_sequence integer DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_int_col_bigint_sequence'::regclass), added_altered_int_col_with_int_sequence integer DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_int_col_int_sequence'::regclass), added_altered_smallint_col_with_smallint_sequence smallint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_smallint_col_smallint_sequence'::regclass)) USING heap; ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test OWNER TO postgres + CREATE TABLE dist_schema_seq_test_without_initial_data.nextval_test (id integer, bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.bigint_col_bigint_sequence'::regclass), bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.bigint_col_int_sequence'::regclass), int_col_with_bigint_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.int_col_bigint_sequence'::regclass), int_col_with_int_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.int_col_int_sequence'::regclass), smallint_col_with_smallint_sequence smallint DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.smallint_col_smallint_sequence'::regclass), added_bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_bigint_col_bigint_sequence'::regclass), added_bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_bigint_col_int_sequence'::regclass), added_int_col_with_bigint_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.added_int_col_bigint_sequence'::regclass), added_int_col_with_int_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.added_int_col_int_sequence'::regclass), added_smallint_col_with_smallint_sequence smallint DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.added_smallint_col_smallint_sequence'::regclass), added_altered_bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_bigint_col_bigint_sequence'::regclass), added_altered_bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_bigint_col_int_sequence'::regclass), added_altered_int_col_with_bigint_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.added_altered_int_col_bigint_sequence'::regclass), added_altered_int_col_with_int_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.added_altered_int_col_int_sequence'::regclass), added_altered_smallint_col_with_smallint_sequence smallint DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.added_altered_smallint_col_smallint_sequence'::regclass)) USING heap; ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test OWNER TO postgres + CREATE TABLE dist_schema_seq_test_without_initial_data.nextval_test (id integer, bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.bigint_col_bigint_sequence'::regclass), bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.bigint_col_int_sequence'::regclass), int_col_with_bigint_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.int_col_bigint_sequence'::regclass), int_col_with_int_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.int_col_int_sequence'::regclass), smallint_col_with_smallint_sequence smallint DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.smallint_col_smallint_sequence'::regclass), added_bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_bigint_col_bigint_sequence'::regclass), added_bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_bigint_col_int_sequence'::regclass), added_int_col_with_bigint_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.added_int_col_bigint_sequence'::regclass), added_int_col_with_int_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.added_int_col_int_sequence'::regclass), added_smallint_col_with_smallint_sequence smallint DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.added_smallint_col_smallint_sequence'::regclass), added_altered_bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_bigint_col_bigint_sequence'::regclass), added_altered_bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_bigint_col_int_sequence'::regclass), added_altered_int_col_with_bigint_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.added_altered_int_col_bigint_sequence'::regclass), added_altered_int_col_with_int_sequence integer DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.added_altered_int_col_int_sequence'::regclass), added_altered_smallint_col_with_smallint_sequence smallint DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.added_altered_smallint_col_smallint_sequence'::regclass)) USING heap; ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test OWNER TO postgres +(3 rows) + +-- Should succeed on all nodes as we don't try inserting column default values +-- for the columns that are using int / smallint based sequences. +-- Doing so is okay from the coordinator but would cause an error on workers. +SELECT result FROM run_command_on_all_nodes( + $$ + WITH ins AS ( + INSERT INTO dist_schema_seq_test_without_initial_data.nextval_test VALUES (10, DEFAULT, DEFAULT, 1, 1, 1, DEFAULT, DEFAULT, 1, 1, 1, DEFAULT, DEFAULT, 1, 1, 1) RETURNING * + ) + SELECT to_jsonb(ins) FROM ins; + $$, + parallel => false +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + {"id": 10, "int_col_with_int_sequence": 1, "bigint_col_with_int_sequence": 1, "int_col_with_bigint_sequence": 1, "added_int_col_with_int_sequence": 1, "bigint_col_with_bigint_sequence": 5000, "added_bigint_col_with_int_sequence": 1, "added_int_col_with_bigint_sequence": 1, "smallint_col_with_smallint_sequence": 1, "added_bigint_col_with_bigint_sequence": 1, "added_altered_int_col_with_int_sequence": 1, "added_smallint_col_with_smallint_sequence": 1, "added_altered_bigint_col_with_int_sequence": 1, "added_altered_int_col_with_bigint_sequence": 1, "added_altered_bigint_col_with_bigint_sequence": 1, "added_altered_smallint_col_with_smallint_sequence": 1} + {"id": 10, "int_col_with_int_sequence": 1, "bigint_col_with_int_sequence": 9288674231451649, "int_col_with_bigint_sequence": 1, "added_int_col_with_int_sequence": 1, "bigint_col_with_bigint_sequence": 9288674231451649, "added_bigint_col_with_int_sequence": 9288674231451649, "added_int_col_with_bigint_sequence": 1, "smallint_col_with_smallint_sequence": 1, "added_bigint_col_with_bigint_sequence": 9288674231451649, "added_altered_int_col_with_int_sequence": 1, "added_smallint_col_with_smallint_sequence": 1, "added_altered_bigint_col_with_int_sequence": 9288674231451649, "added_altered_int_col_with_bigint_sequence": 1, "added_altered_bigint_col_with_bigint_sequence": 9288674231451649, "added_altered_smallint_col_with_smallint_sequence": 1} + {"id": 10, "int_col_with_int_sequence": 1, "bigint_col_with_int_sequence": 13229323905400833, "int_col_with_bigint_sequence": 1, "added_int_col_with_int_sequence": 1, "bigint_col_with_bigint_sequence": 13229323905400833, "added_bigint_col_with_int_sequence": 13229323905400833, "added_int_col_with_bigint_sequence": 1, "smallint_col_with_smallint_sequence": 1, "added_bigint_col_with_bigint_sequence": 13229323905400833, "added_altered_int_col_with_int_sequence": 1, "added_smallint_col_with_smallint_sequence": 1, "added_altered_bigint_col_with_int_sequence": 13229323905400833, "added_altered_int_col_with_bigint_sequence": 1, "added_altered_bigint_col_with_bigint_sequence": 13229323905400833, "added_altered_smallint_col_with_smallint_sequence": 1} +(3 rows) + +-- will fail on workers but should still succeed on the coordinator +SELECT result FROM run_command_on_all_nodes( + $$ + WITH ins AS ( + INSERT INTO dist_schema_seq_test_without_initial_data.nextval_test VALUES (11, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT) RETURNING * + ) + SELECT to_jsonb(ins) FROM ins; + $$, + parallel => false +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + {"id": 11, "int_col_with_int_sequence": 1, "bigint_col_with_int_sequence": 2, "int_col_with_bigint_sequence": 1, "added_int_col_with_int_sequence": 1, "bigint_col_with_bigint_sequence": 5100, "added_bigint_col_with_int_sequence": 2, "added_int_col_with_bigint_sequence": 1, "smallint_col_with_smallint_sequence": 1, "added_bigint_col_with_bigint_sequence": 2, "added_altered_int_col_with_int_sequence": 1, "added_smallint_col_with_smallint_sequence": 1, "added_altered_bigint_col_with_int_sequence": 2, "added_altered_int_col_with_bigint_sequence": 1, "added_altered_bigint_col_with_bigint_sequence": 2, "added_altered_smallint_col_with_smallint_sequence": 1} + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint +(3 rows) + +-- all fail on workers, specifically test int / smallint columns added later +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_without_initial_data.nextval_test VALUES (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, DEFAULT, 1, 1, 1, 1, 1)$$, parallel => false); + result +--------------------------------------------------------------------- + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint +(2 rows) + +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_without_initial_data.nextval_test VALUES (1, 1, 1, 1, 1, 1, 1, 1, 1, DEFAULT, 1, 1, 1, 1, 1, 1)$$, parallel => false); + result +--------------------------------------------------------------------- + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint +(2 rows) + +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_without_initial_data.nextval_test VALUES (1, 1, 1, 1, 1, 1, 1, 1, DEFAULT, 1, 1, 1, 1, 1, 1, 1)$$, parallel => false); + result +--------------------------------------------------------------------- + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint +(2 rows) + +SELECT * FROM dist_schema_seq_test_without_initial_data.nextval_test ORDER BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16; + id | bigint_col_with_bigint_sequence | bigint_col_with_int_sequence | int_col_with_bigint_sequence | int_col_with_int_sequence | smallint_col_with_smallint_sequence | added_bigint_col_with_bigint_sequence | added_bigint_col_with_int_sequence | added_int_col_with_bigint_sequence | added_int_col_with_int_sequence | added_smallint_col_with_smallint_sequence | added_altered_bigint_col_with_bigint_sequence | added_altered_bigint_col_with_int_sequence | added_altered_int_col_with_bigint_sequence | added_altered_int_col_with_int_sequence | added_altered_smallint_col_with_smallint_sequence +--------------------------------------------------------------------- + 10 | 5000 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 + 10 | 9288674231451649 | 9288674231451649 | 1 | 1 | 1 | 9288674231451649 | 9288674231451649 | 1 | 1 | 1 | 9288674231451649 | 9288674231451649 | 1 | 1 | 1 + 10 | 13229323905400833 | 13229323905400833 | 1 | 1 | 1 | 13229323905400833 | 13229323905400833 | 1 | 1 | 1 | 13229323905400833 | 13229323905400833 | 1 | 1 | 1 + 11 | 5100 | 2 | 1 | 1 | 1 | 2 | 2 | 1 | 1 | 1 | 2 | 2 | 1 | 1 | 1 +(4 rows) + +-- create a table with built-in sequences under dist_schema_seq_test_without_initial_data +CREATE TABLE dist_schema_seq_test_without_initial_data.built_in_seq_test ( + id int, + smallserial_col smallserial, + serial_col serial, + bigserial_col bigserial, + generated_smallint_col smallint GENERATED BY DEFAULT AS IDENTITY, + generated_int_col int GENERATED BY DEFAULT AS IDENTITY, + generated_bigint_col bigint GENERATED BY DEFAULT AS IDENTITY (START WITH 178 INCREMENT BY 17 MINVALUE 100 MAXVALUE 1500600) +); +-- check built-in sequences +SELECT result FROM run_command_on_all_nodes( +$$ +WITH sequence_info AS ( + SELECT oid::regclass::text AS name, (get_sequence_info(oid)).* + FROM ( + SELECT objid + FROM pg_depend d + JOIN pg_class c ON c.oid = d.objid + WHERE d.refobjid = 'dist_schema_seq_test_without_initial_data.built_in_seq_test'::regclass AND + c.relkind = 'S' + ) sequence(oid) +) +SELECT jsonb_agg( + jsonb_build_object( + 'name', name, + 'type_name', type_name, + 'start_value', start_value, + 'last_value', last_value, + 'min_value', min_value, + 'max_value', max_value + ) + ORDER BY name +) +FROM sequence_info; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + [{"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_bigserial_col_seq", "max_value": 9223372036854775807, "min_value": 1, "type_name": "bigint", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_generated_bigint_col_seq", "max_value": 1500600, "min_value": 100, "type_name": "bigint", "last_value": 178, "start_value": 178}, {"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_generated_int_col_seq", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_generated_smallint_col_seq", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_serial_col_seq", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 1, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_smallserial_col_seq", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 1, "start_value": 1}] + [{"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_bigserial_col_seq", "max_value": 9570149208162305, "min_value": 9288674231451649, "type_name": "bigint", "last_value": 9288674231451649, "start_value": 9288674231451649}, {"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_generated_bigint_col_seq", "max_value": 9570149208162305, "min_value": 9288674231451649, "type_name": "bigint", "last_value": 9288674231451649, "start_value": 9288674231451649}, {"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_generated_int_col_seq", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_generated_smallint_col_seq", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_serial_col_seq", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_smallserial_col_seq", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}] + [{"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_bigserial_col_seq", "max_value": 13510798882111489, "min_value": 13229323905400833, "type_name": "bigint", "last_value": 13229323905400833, "start_value": 13229323905400833}, {"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_generated_bigint_col_seq", "max_value": 13510798882111489, "min_value": 13229323905400833, "type_name": "bigint", "last_value": 13229323905400833, "start_value": 13229323905400833}, {"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_generated_int_col_seq", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_generated_smallint_col_seq", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_serial_col_seq", "max_value": 2147483647, "min_value": 1, "type_name": "integer", "last_value": 2147483647, "start_value": 1}, {"name": "dist_schema_seq_test_without_initial_data.built_in_seq_test_smallserial_col_seq", "max_value": 32767, "min_value": 1, "type_name": "smallint", "last_value": 32767, "start_value": 1}] +(3 rows) + +-- check nextval calls used in table definition +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('dist_schema_seq_test_without_initial_data.built_in_seq_test') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + CREATE TABLE dist_schema_seq_test_without_initial_data.built_in_seq_test (id integer, smallserial_col smallint DEFAULT nextval('dist_schema_seq_test_without_initial_data.built_in_seq_test_smallserial_col_seq'::regclass) NOT NULL, serial_col integer DEFAULT nextval('dist_schema_seq_test_without_initial_data.built_in_seq_test_serial_col_seq'::regclass) NOT NULL, bigserial_col bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.built_in_seq_test_bigserial_col_seq'::regclass) NOT NULL, generated_smallint_col smallint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 1 MAXVALUE 32767 START WITH 1 CACHE 1 NO CYCLE) NOT NULL, generated_int_col integer GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 1 MAXVALUE 2147483647 START WITH 1 CACHE 1 NO CYCLE) NOT NULL, generated_bigint_col bigint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 17 MINVALUE 100 MAXVALUE 1500600 START WITH 178 CACHE 1 NO CYCLE) NOT NULL) USING heap; ALTER TABLE dist_schema_seq_test_without_initial_data.built_in_seq_test OWNER TO postgres + CREATE TABLE dist_schema_seq_test_without_initial_data.built_in_seq_test (id integer, smallserial_col smallint DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.built_in_seq_test_smallserial_col_seq'::regclass) NOT NULL, serial_col integer DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.built_in_seq_test_serial_col_seq'::regclass) NOT NULL, bigserial_col bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.built_in_seq_test_bigserial_col_seq'::regclass) NOT NULL, generated_smallint_col smallint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 1 MAXVALUE 32767 START WITH 1 CACHE 1 NO CYCLE) NOT NULL, generated_int_col integer GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 1 MAXVALUE 2147483647 START WITH 1 CACHE 1 NO CYCLE) NOT NULL, generated_bigint_col bigint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 17 MINVALUE 9288674231451649 MAXVALUE 9570149208162305 START WITH 9288674231451649 CACHE 1 NO CYCLE) NOT NULL) USING heap; ALTER TABLE dist_schema_seq_test_without_initial_data.built_in_seq_test OWNER TO postgres + CREATE TABLE dist_schema_seq_test_without_initial_data.built_in_seq_test (id integer, smallserial_col smallint DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.built_in_seq_test_smallserial_col_seq'::regclass) NOT NULL, serial_col integer DEFAULT worker_nextval('dist_schema_seq_test_without_initial_data.built_in_seq_test_serial_col_seq'::regclass) NOT NULL, bigserial_col bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.built_in_seq_test_bigserial_col_seq'::regclass) NOT NULL, generated_smallint_col smallint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 1 MAXVALUE 32767 START WITH 1 CACHE 1 NO CYCLE) NOT NULL, generated_int_col integer GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 1 MINVALUE 1 MAXVALUE 2147483647 START WITH 1 CACHE 1 NO CYCLE) NOT NULL, generated_bigint_col bigint GENERATED BY DEFAULT AS IDENTITY (INCREMENT BY 17 MINVALUE 13229323905400833 MAXVALUE 13510798882111489 START WITH 13229323905400833 CACHE 1 NO CYCLE) NOT NULL) USING heap; ALTER TABLE dist_schema_seq_test_without_initial_data.built_in_seq_test OWNER TO postgres +(3 rows) + +-- Should succeed on all nodes as we don't try inserting column default values +-- for the columns that are using int / smallint based sequences. +-- Doing so is okay from the coordinator but would cause an error on workers. +SELECT result FROM run_command_on_all_nodes( + $$ + WITH ins AS ( + INSERT INTO dist_schema_seq_test_without_initial_data.built_in_seq_test VALUES (10, 1, 1, DEFAULT, 1, 1, DEFAULT) RETURNING * + ) + SELECT to_jsonb(ins) FROM ins; + $$, + parallel => false +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + {"id": 10, "serial_col": 1, "bigserial_col": 1, "smallserial_col": 1, "generated_int_col": 1, "generated_bigint_col": 178, "generated_smallint_col": 1} + {"id": 10, "serial_col": 1, "bigserial_col": 9288674231451649, "smallserial_col": 1, "generated_int_col": 1, "generated_bigint_col": 9288674231451649, "generated_smallint_col": 1} + {"id": 10, "serial_col": 1, "bigserial_col": 13229323905400833, "smallserial_col": 1, "generated_int_col": 1, "generated_bigint_col": 13229323905400833, "generated_smallint_col": 1} +(3 rows) + +-- will fail on workers but should still succeed on the coordinator +SELECT result FROM run_command_on_all_nodes( + $$ + WITH ins AS ( + INSERT INTO dist_schema_seq_test_without_initial_data.built_in_seq_test VALUES (11, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT) RETURNING * + ) + SELECT to_jsonb(ins) FROM ins; + $$, + parallel => false +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + {"id": 11, "serial_col": 1, "bigserial_col": 2, "smallserial_col": 1, "generated_int_col": 1, "generated_bigint_col": 195, "generated_smallint_col": 1} + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint + ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint +(3 rows) + +SELECT * FROM dist_schema_seq_test_without_initial_data.built_in_seq_test ORDER BY 1, 2, 3, 4, 5, 6, 7; + id | smallserial_col | serial_col | bigserial_col | generated_smallint_col | generated_int_col | generated_bigint_col +--------------------------------------------------------------------- + 10 | 1 | 1 | 1 | 1 | 1 | 178 + 10 | 1 | 1 | 9288674231451649 | 1 | 1 | 9288674231451649 + 10 | 1 | 1 | 13229323905400833 | 1 | 1 | 13229323905400833 + 11 | 1 | 1 | 2 | 1 | 1 | 195 +(4 rows) + +-- intermediate cleanup +\c - - - :master_port +SET client_min_messages TO WARNING; +DROP SCHEMA tenant_1, tenant_2, tenant_3, tenant_4, tenant_5, tenant_6, tenant_7, tenant_8, tenant_9, alter_table_add_column, initially_local_schema_seq_test_with_initial_data, dist_schema_seq_test_with_initial_data, initially_local_schema_seq_test_with_initial_data, dist_schema_seq_test_without_initial_data CASCADE; +DROP SCHEMA regular_schema, alter_table_add_column_other_schema, regular_schema_worker_1, regular_schema_worker_2, regular_schema_worker_3, regular_schema_worker_4, regular_schema_worker_5 CASCADE; +DROP FUNCTION create_citus_local_with_data(text), get_sequence_info(regclass); +DROP SEQUENCE dist_seq; +DROP ROLE tenant_9_owner; +ALTER EXTENSION citus DROP ACCESS METHOD fake_am; +SELECT result FROM run_command_on_all_nodes($Q$ + DROP FUNCTION fake_am_handler(internal) CASCADE; +$Q$); + result +--------------------------------------------------------------------- + DROP FUNCTION + DROP FUNCTION + DROP FUNCTION +(3 rows) + +DROP ROLE rls_test_user_1, rls_test_user_2; +-- do the work that's required to be done from the coordinator for the rest of the tests +ALTER ROLE test_non_super_user WITH LOGIN; +GRANT CREATE ON DATABASE regression TO test_non_super_user; +SET citus.next_shard_id TO 2094000; +SET client_min_messages TO WARNING; +-- Remove a node, create a reference table, and the node back so we can test +-- implicit reference table replication when creating a distributed-schema +-- table. +SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +CREATE SCHEMA regular_schema; +CREATE TABLE regular_schema.reference_table (id bigint PRIMARY KEY); +SELECT create_reference_table('regular_schema.reference_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO regular_schema.reference_table SELECT i FROM generate_series(1, 10) AS i; +GRANT USAGE ON SCHEMA regular_schema TO test_non_super_user; +GRANT SELECT ON regular_schema.reference_table TO test_non_super_user; +GRANT REFERENCES ON regular_schema.reference_table TO test_non_super_user; +GRANT DELETE ON regular_schema.reference_table TO test_non_super_user; +-- add it with the groupid used earlier in the test file +SELECT 1 FROM citus_add_node('localhost', :worker_2_port, groupid => 47); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2094050; +\c - test_non_super_user - :worker_1_port +SET citus.shard_replication_factor TO 1; +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; +BEGIN; + CREATE SCHEMA tenant_10; + CREATE TABLE tenant_10.t1(id int REFERENCES regular_schema.reference_table(id)); + INSERT INTO tenant_10.t1 SELECT i FROM generate_series(1, 5) AS i; + DELETE FROM tenant_10.t1 WHERE id > 3; + DELETE FROM regular_schema.reference_table WHERE id > 3; +COMMIT; +\c - postgres - :master_port +-- do the work that's required to be done from the coordinator for the rest of the tests +SET citus.next_shard_id TO 2094100; +SET client_min_messages TO WARNING; +DROP SCHEMA tenant_10 CASCADE; +DROP TABLE regular_schema.reference_table; +-- Remove a node, create a reference table, and the node back so we can test +-- implicit reference table replication when creating a distributed-schema +-- table. +SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +CREATE TABLE regular_schema.reference_table (id bigint PRIMARY KEY); +SELECT create_reference_table('regular_schema.reference_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO regular_schema.reference_table SELECT i FROM generate_series(1, 10) AS i; +GRANT USAGE ON SCHEMA regular_schema TO test_non_super_user; +GRANT SELECT ON regular_schema.reference_table TO test_non_super_user; +GRANT REFERENCES ON regular_schema.reference_table TO test_non_super_user; +GRANT DELETE ON regular_schema.reference_table TO test_non_super_user; +-- add it with the groupid used earlier in the test file +SELECT 1 FROM citus_add_node('localhost', :worker_2_port, groupid => 47); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2094150; +\c - test_non_super_user - :worker_1_port +SET citus.shard_replication_factor TO 1; +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; +-- This time first create an empty schema to force placing the shard placements +-- of the tables under tenant_12 to the other worker node, differently than the +-- one chosed for the shard placements under tenant_10. +BEGIN; + CREATE SCHEMA tenant_11; + CREATE SCHEMA tenant_12; + CREATE TABLE tenant_12.t1(id int REFERENCES regular_schema.reference_table(id)); + INSERT INTO tenant_12.t1 SELECT i FROM generate_series(1, 5) AS i; + DELETE FROM tenant_12.t1 WHERE id > 3; + DELETE FROM regular_schema.reference_table WHERE id > 3; +COMMIT; +-- Ensure non-super user can alter / drop distributed-schema tables and +-- create / drop distributed-schemas too. +ALTER TABLE tenant_12.t1 RENAME TO "\!@#?t1_renamed"; +ALTER SCHEMA tenant_12 RENAME TO "\!@#?tenant_12_renamed"; +SET search_path TO "\!@#?tenant_12_renamed"; +DROP TABLE "\!@#?t1_renamed"; +CREATE TABLE "\!@#?t3"(id int); +ALTER TABLE "\!@#?t3" RENAME TO "t3"; +RESET search_path; +ALTER SCHEMA "\!@#?tenant_12_renamed" RENAME TO tenant_12; +CREATE TABLE tenant_12.t2(a int); +ALTER TABLE tenant_12.t2 RENAME TO t2_renamed; +DROP SCHEMA tenant_11; +CREATE SCHEMA tenant_13; +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT jsonb_agg( + jsonb_build_object ( + 'nspname', nspname, + 'relnames', relnames + ) + ORDER BY nspname +) +FROM ( + SELECT nspname, + array_agg(relname ORDER BY relname) AS relnames + FROM pg_namespace + LEFT JOIN pg_class ON (pg_namespace.oid = pg_class.relnamespace) + WHERE nspname IN ('tenant_11', 'tenant_12', 'tenant_13') AND + (pg_class.oid is NULL OR NOT relation_is_a_known_shard(pg_class.oid)) + GROUP BY pg_namespace.oid +) q; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + [{"nspname": "tenant_12", "relnames": ["t2_renamed", "t3"]}, {"nspname": "tenant_13", "relnames": [null]}] + [{"nspname": "tenant_12", "relnames": ["t2_renamed", "t3"]}, {"nspname": "tenant_13", "relnames": [null]}] + [{"nspname": "tenant_12", "relnames": ["t2_renamed", "t3"]}, {"nspname": "tenant_13", "relnames": [null]}] +(3 rows) + +\c - postgres - :master_port +GRANT ALL ON SCHEMA regular_schema TO test_non_super_user; +\c - test_non_super_user - :master_port +SET citus.next_shard_id TO 2094200; +SET client_min_messages TO WARNING; +CREATE TABLE regular_schema.fkey_test_reference_table(a int PRIMARY KEY); +SELECT create_reference_table('regular_schema.fkey_test_reference_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +-- gets automatically converted to a citus local table +CREATE TABLE regular_schema.fkey_test_citus_local_1(a int PRIMARY KEY REFERENCES regular_schema.fkey_test_reference_table(a)); +ALTER TABLE regular_schema.fkey_test_reference_table ADD CONSTRAINT fkey_to_drop FOREIGN KEY (a) REFERENCES regular_schema.fkey_test_citus_local_1(a); +\c - postgres - :master_port +ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2094250; +\c - test_non_super_user - :worker_1_port +SET citus.shard_replication_factor TO 1; +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; +CREATE SCHEMA fkey_test_tenant; +CREATE TABLE fkey_test_tenant.t1(id int PRIMARY KEY REFERENCES regular_schema.fkey_test_reference_table(a)); +CREATE TABLE fkey_test_tenant.t2(id int PRIMARY KEY REFERENCES fkey_test_tenant.t1(id), + other int REFERENCES regular_schema.fkey_test_reference_table(a)); +-- errors due to the foreign key "from" the reference table +ALTER TABLE regular_schema.fkey_test_citus_local_1 SET SCHEMA fkey_test_tenant; +ERROR: cannot create foreign keys to tables in a distributed schema from another schema +DETAIL: "regular_schema.fkey_test_reference_table" references "fkey_test_tenant.fkey_test_citus_local_1" via foreign key constraint "fkey_to_drop" +\c - test_non_super_user - :master_port +SET citus.next_shard_id TO 2094300; +SET client_min_messages TO WARNING; +ALTER TABLE regular_schema.fkey_test_reference_table DROP CONSTRAINT fkey_to_drop; +\c - postgres - :master_port +ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2094350; +\c - test_non_super_user - :worker_1_port +SET citus.shard_replication_factor TO 1; +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; +-- succeeds +ALTER TABLE regular_schema.fkey_test_citus_local_1 SET SCHEMA fkey_test_tenant; +NOTICE: Moving fkey_test_citus_local_1 into distributed schema fkey_test_tenant +CREATE TABLE regular_schema.fkey_test_local_1(a int PRIMARY KEY REFERENCES fkey_test_tenant.t1(id)); +ALTER TABLE regular_schema.fkey_test_local_1 SET SCHEMA fkey_test_tenant; +NOTICE: Moving fkey_test_local_1 into distributed schema fkey_test_tenant +SELECT result FROM run_command_on_all_nodes( +$$ +WITH tables AS ( + SELECT unnest( + ARRAY[ + 'regular_schema.fkey_test_reference_table'::regclass, + 'fkey_test_tenant.fkey_test_citus_local_1'::regclass, + 'fkey_test_tenant.fkey_test_local_1'::regclass, + 'fkey_test_tenant.t1'::regclass, + 'fkey_test_tenant.t2'::regclass + ] + ) AS oid +) +SELECT array_agg(conname ORDER BY conname) FROM pg_constraint +WHERE contype = 'f' AND + ( + conrelid IN (SELECT oid FROM tables) OR + confrelid IN (SELECT oid FROM tables) + ) +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + result +--------------------------------------------------------------------- + {fkey_test_citus_local_1_a_fkey,fkey_test_local_1_a_fkey,t1_id_fkey,t2_id_fkey,t2_other_fkey} + {fkey_test_citus_local_1_a_fkey,fkey_test_local_1_a_fkey,t1_id_fkey,t2_id_fkey,t2_other_fkey} + {fkey_test_citus_local_1_a_fkey,fkey_test_local_1_a_fkey,t1_id_fkey,t2_id_fkey,t2_other_fkey} +(3 rows) + +-- check referencing fkey contraints on placements +SELECT result FROM run_command_on_placements('regular_schema.fkey_test_reference_table', $$ SELECT array_agg(conname ORDER BY conname) FROM pg_constraint WHERE contype = 'f' AND conrelid = '%s'::regclass $$); + result +--------------------------------------------------------------------- + + + +(3 rows) + +SELECT result FROM run_command_on_placements('fkey_test_tenant.fkey_test_citus_local_1', $$ SELECT array_agg(conname ORDER BY conname) FROM pg_constraint WHERE contype = 'f' AND conrelid = '%s'::regclass $$); + result +--------------------------------------------------------------------- + {fkey_test_citus_local_1_a_fkey_2094201} +(1 row) + +SELECT result FROM run_command_on_placements('fkey_test_tenant.fkey_test_local_1', $$ SELECT array_agg(conname ORDER BY conname) FROM pg_constraint WHERE contype = 'f' AND conrelid = '%s'::regclass $$); + result +--------------------------------------------------------------------- + {fkey_test_local_1_a_fkey_2094350} +(1 row) + +SELECT result FROM run_command_on_placements('fkey_test_tenant.t1', $$ SELECT array_agg(conname ORDER BY conname) FROM pg_constraint WHERE contype = 'f' AND conrelid = '%s'::regclass $$); + result +--------------------------------------------------------------------- + {t1_id_fkey_2094250} +(1 row) + +SELECT result FROM run_command_on_placements('fkey_test_tenant.t2', $$ SELECT array_agg(conname ORDER BY conname) FROM pg_constraint WHERE contype = 'f' AND conrelid = '%s'::regclass $$); + result +--------------------------------------------------------------------- + {t2_id_fkey_2094251,t2_other_fkey_2094251} +(1 row) + +-- cleanup +\c - postgres - :master_port +SET client_min_messages TO WARNING; +DROP SCHEMA regular_schema, tenant_12, tenant_13, fkey_test_tenant CASCADE; +REVOKE ALL ON DATABASE regression FROM test_non_super_user; +DROP USER test_non_super_user; +-- reset pg_dist_shardid_seq on the coordinator +DO $proc$ +DECLARE + v_last_value bigint; +BEGIN + SELECT last_value INTO v_last_value FROM pg_dist_shardid_seq_prev_state; + EXECUTE format('ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH %s', v_last_value); +END$proc$; +DROP TABLE pg_dist_shardid_seq_prev_state; diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index c73b043f60e..4702d49a2b2 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -81,12 +81,14 @@ ORDER BY 1; function citus_get_node_clock() function citus_get_transaction_clock() function citus_internal.acquire_citus_advisory_object_class_lock(integer,cstring) + function citus_internal.acquire_placement_colocation_lock(bigint,integer) function citus_internal.add_colocation_metadata(integer,integer,integer,regtype,oid) function citus_internal.add_object_metadata(text,text[],text[],integer,integer,boolean) function citus_internal.add_partition_metadata(regclass,"char",text,integer,"char") function citus_internal.add_placement_metadata(bigint,bigint,integer,bigint) function citus_internal.add_shard_metadata(regclass,bigint,"char",text,text) function citus_internal.add_tenant_schema(oid,integer) + function citus_internal.adjust_identity_column_seq_settings(regclass,bigint,boolean) function citus_internal.adjust_local_clock_to_remote(cluster_clock) function citus_internal.citus_internal_copy_single_shard_placement(bigint,integer,integer,integer,citus.shard_transfer_mode) function citus_internal.database_command(text) @@ -96,9 +98,11 @@ ORDER BY 1; function citus_internal.delete_shard_metadata(bigint) function citus_internal.delete_tenant_schema(oid) function citus_internal.find_groupid_for_node(text,integer) + function citus_internal.get_next_colocation_id() function citus_internal.global_blocked_processes() function citus_internal.is_replication_origin_tracking_active() function citus_internal.local_blocked_processes() + function citus_internal.lock_colocation_id(integer,integer) function citus_internal.mark_node_not_synced(integer,integer) function citus_internal.pg_dist_node_trigger_func() function citus_internal.pg_dist_rebalance_strategy_trigger_func() @@ -313,6 +317,7 @@ ORDER BY 1; function worker_apply_inter_shard_ddl_command(bigint,text,bigint,text,text) function worker_apply_sequence_command(text) function worker_apply_sequence_command(text,regtype) + function worker_apply_sequence_command(text,regtype,bigint,boolean) function worker_apply_shard_ddl_command(bigint,text) function worker_apply_shard_ddl_command(bigint,text,text) function worker_binary_partial_agg(oid,anyelement) @@ -408,6 +413,6 @@ ORDER BY 1; view citus_tables view pg_dist_shard_placement view time_partitions -(376 rows) +(381 rows) DROP TABLE extension_basic_types; diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index 1b0f1427a42..cf99f17932b 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -7,8 +7,8 @@ test: isolation_add_node_vs_reference_table_operations test: isolation_create_table_vs_add_remove_node test: isolation_master_update_node test: isolation_shouldhaveshards - test: isolation_extension_commands +test: isolation_schema_based_sharding_from_any_node # tests that change node metadata should precede # isolation_cluster_management such that tests diff --git a/src/test/regress/mitmscripts/README.md b/src/test/regress/mitmscripts/README.md index 884f10b0658..db723ae1769 100644 --- a/src/test/regress/mitmscripts/README.md +++ b/src/test/regress/mitmscripts/README.md @@ -18,9 +18,10 @@ Automated Failure Testing works by inserting a network proxy (mitmproxy) between ## Getting Started First off, to use this you'll need mitmproxy. -Currently, we rely on a [fork](https://github.com/thanodnl/mitmproxy/tree/fix/tcp-flow-kill) to run the failure tests. -We recommned using pipenv to setup your failure testing environment since that will handle installing the fork -and other dependencies which may be updated/changed. +The failure tests use the Citus mitmproxy fork pinned in the Pipfile and Pipfile.lock, with only the small +TCP-layer patch we still need on top of upstream 12.2.2. +We recommend using pipenv to set up your failure testing environment since that will install the tested +version and the rest of the dependencies together. Setting up pipenv is easy if you already have python and pip set up: ```bash diff --git a/src/test/regress/multi_1_create_citus_schedule b/src/test/regress/multi_1_create_citus_schedule index e94a5e54b73..048423f50fc 100644 --- a/src/test/regress/multi_1_create_citus_schedule +++ b/src/test/regress/multi_1_create_citus_schedule @@ -16,6 +16,7 @@ test: multi_extension test: multi_test_helpers multi_test_helpers_superuser test: multi_cluster_management +test: multi_create_fdw # --- # Prerequisites for multi_metadata_sync (from citus_tests/run_test.py) @@ -79,4 +80,34 @@ test: mx_regular_user # ---------- test: multi_multiuser +# Don't parallelize stat_counters with others because we don't want statistics +# to be updated by other tests concurrently except Citus Maintenance Daemon. +# +# Also, this needs to be the first test that calls citus_stat_counters() +# because it checks the value of stats_reset column before calling the function. +test: stat_counters + test: function_propagation +test: drop_database +test: pg16 +test: pg18 +test: multi_transaction_recovery_multiple_databases + +# Have this placeholder section at the end for the tests that need to be moved back +# to their original schedules with the next major version of Citus are listed below. +# +# These tests are kept here to prevent N-1 test failures for the features that are +# not present in the minor version. +# --------------------------------------------------------------------------------- + +# ---------- +# ---------- +# multi_unsupported_worker_operations tests that unsupported operations error out on metadata workers +# ---------- +test: multi_unsupported_worker_operations + +# Don't parallelize the following two tests as they directly alter pg_dist_shardid_seq +# on the coordinator and restore it at the end. +# Will be moved back to multi_1_schedule at Citus 15. +test: schema_based_sharding_from_workers_a +test: schema_based_sharding_from_workers_b diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 538a732e3e6..04e39ca22e1 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -44,15 +44,7 @@ test: comment_on_role test: single_shard_table_udfs test: schema_based_sharding test: citus_schema_distribute_undistribute -# Don't parallelize stat_counters with others because we don't want statistics -# to be updated by other tests concurrently except Citus Maintenance Daemon. -# -# Also, this needs to be the first test that calls citus_stat_counters() -# because it checks the value of stats_reset column before calling the function. -test: stat_counters - test: columnar_citus_integration - test: multi_test_catalog_views test: multi_alias test: grant_on_sequence_propagation @@ -212,7 +204,6 @@ test: multi_repartition_udt multi_repartitioned_subquery_udf multi_subtransactio test: multi_generate_ddl_commands test: multi_create_shards -test: multi_transaction_recovery_multiple_databases test: local_dist_join_modifications test: local_table_join @@ -245,13 +236,6 @@ test: multi_large_shardid # ---------- test: multi_size_queries -# ---------- -# ---------- -# multi_unsupported_worker_operations tests that unsupported operations error out on metadata workers -# ---------- -test: multi_unsupported_worker_operations - - # ---------- # multi_function_evaluation tests edge-cases in master-side function pre-evaluation # ---------- diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index c61488665a0..4bbabc95572 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -1,7 +1,6 @@ test: multi_test_helpers multi_test_helpers_superuser test: multi_cluster_management test: create_role_propagation -test: pg16 test: multi_create_fdw test: multi_test_catalog_views test: replicated_table_disable_node @@ -68,7 +67,6 @@ test: pg14 test: pg15 test: pg15_jsonpath detect_conn_close test: pg17 pg17_json -test: pg18 test: drop_column_partitioned_table test: tableam @@ -138,5 +136,4 @@ test: ensure_no_shared_connection_leak test: check_mx test: generated_identity -test: drop_database test: check_cluster_state diff --git a/src/test/regress/spec/isolation_schema_based_sharding_from_any_node.spec b/src/test/regress/spec/isolation_schema_based_sharding_from_any_node.spec new file mode 100644 index 00000000000..285e4019cf7 --- /dev/null +++ b/src/test/regress/spec/isolation_schema_based_sharding_from_any_node.spec @@ -0,0 +1,140 @@ +#include "isolation_mx_common.include.spec" + +setup +{ + SELECT citus_set_coordinator_host('localhost', 57636); + + SELECT 1 FROM citus_add_node('localhost', 57637); + SELECT 1 FROM citus_add_node('localhost', 57638); + + SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off;CREATE OR REPLACE FUNCTION override_backend_data_gpid(bigint) + RETURNS void + LANGUAGE C STRICT IMMUTABLE + AS 'citus'$$); + + + -- Consistently place distributed-schema tables in the cluster, + -- see EmptySingleShardTableColocationDecideNodeId(). + ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 10001; + + SET citus.shard_replication_factor TO 1; + + SET citus.enable_schema_based_sharding TO ON; + CREATE SCHEMA sc1; + RESET citus.enable_schema_based_sharding; +} + +teardown +{ + DROP SCHEMA IF EXISTS sc1, sc2 CASCADE; + DROP TABLE IF EXISTS ref; + SELECT COUNT(*)>=0 FROM ( + SELECT citus_remove_node(nodename, nodeport) + FROM pg_dist_node + WHERE nodename = 'localhost' AND nodeport IN (57637, 57638) + ) q; +} + +session "coord" + +step "coord-begin" { BEGIN; } +step "coord-commit" { COMMIT; } +step "coord-shard-replication-factor" { SET citus.shard_replication_factor TO 1; } +step "coord-create-table-ref" { CREATE TABLE ref (id int PRIMARY KEY); } +step "coord-create-reference-table-ref" { SELECT create_reference_table('ref'); } +step "coord-create-table-sc1-t1" { CREATE TABLE sc1.t1 (a int); } +step "coord-create-table-sc1-t2" { CREATE TABLE sc1.t2 (a int); } +step "coord-add-worker-57638" { SELECT 1 FROM citus_add_node('localhost', 57638); } +step "coord-remove-worker-57638" { SELECT 1 FROM citus_remove_node('localhost', 57638); } +step "coord-query-ref-placements" { SELECT nodeport, success, result FROM run_command_on_placements('ref', 'SELECT count(*) FROM %s') ORDER BY nodeport; } +step "coord-query-sc1-t1-placement" { SELECT nodeport, success, result FROM run_command_on_placements('sc1.t1', 'SELECT count(*) FROM %s') ORDER BY nodeport; } +step "coord-query-sc1-t2-placement" { SELECT nodeport, success, result FROM run_command_on_placements('sc1.t2', 'SELECT count(*) FROM %s') ORDER BY nodeport; } +step "coord-move-shard-sc1-t1" +{ + SELECT citus_move_shard_placement( + s.shardid, + src.nodename, src.nodeport, + dst.nodename, dst.nodeport, + 'block_writes' + ) + FROM pg_dist_shard s + JOIN pg_dist_shard_placement src USING (shardid) + CROSS JOIN ( + SELECT nodename, nodeport + FROM pg_dist_node + WHERE noderole = 'primary' AND isactive AND shouldhaveshards AND + (nodename, nodeport) NOT IN ( + SELECT p.nodename, p.nodeport + FROM pg_dist_shard_placement p + JOIN pg_dist_shard ps USING (shardid) + WHERE ps.logicalrelid = 'sc1.t1'::regclass + ) + ORDER BY nodeport + LIMIT 1 + ) dst + WHERE s.logicalrelid = 'sc1.t1'::regclass; +} +step "coord-show-tables-in-schema-sc1" +{ + SELECT nodeport, success, result + FROM run_command_on_all_nodes($$ + SELECT array_agg(tablename ORDER BY tablename) FROM pg_tables WHERE schemaname = 'sc1' AND tablename IN ('t1', 't2', 't1_renamed') + $$) + JOIN pg_dist_node USING (nodeid) + ORDER BY nodeport; +} + +session "worker-57637" + +step "worker-57637-start" { SELECT start_session_level_connection_to_node('localhost', 57637); } +step "worker-57637-stop" { SELECT stop_session_level_connection_to_node(); } +step "worker-57637-begin" { SELECT run_commands_on_session_level_connection_to_node('BEGIN'); } +step "worker-57637-commit" { SELECT run_commands_on_session_level_connection_to_node('COMMIT'); } +step "worker-57637-shard-replication-factor" { SELECT run_commands_on_session_level_connection_to_node('SET citus.shard_replication_factor TO 1'); } +step "worker-57637-create-table-sc1-t1" { SELECT run_commands_on_session_level_connection_to_node('CREATE TABLE sc1.t1 (a int)'); } +step "worker-57637-create-table-sc1-t2" { SELECT run_commands_on_session_level_connection_to_node('CREATE TABLE sc1.t2 (a int)'); } +step "worker-57637-drop-table-sc1-t1" { SELECT run_commands_on_session_level_connection_to_node('DROP TABLE sc1.t1'); } +step "worker-57637-alter-table-rename-sc1-t1" { SELECT run_commands_on_session_level_connection_to_node('ALTER TABLE sc1.t1 RENAME TO t1_renamed'); } + +session "worker-57638" + +step "worker-57638-start" { SELECT start_session_level_connection_to_node('localhost', 57638); } +step "worker-57638-stop" { SELECT stop_session_level_connection_to_node(); } +step "worker-57638-begin" { SELECT run_commands_on_session_level_connection_to_node('BEGIN'); } +step "worker-57638-commit" { SELECT run_commands_on_session_level_connection_to_node('COMMIT'); } +step "worker-57638-shard-replication-factor" { SELECT run_commands_on_session_level_connection_to_node('SET citus.shard_replication_factor TO 1'); } +step "worker-57638-create-table-sc1-t1" { SELECT run_commands_on_session_level_connection_to_node('CREATE TABLE sc1.t1 (a int)'); } +step "worker-57638-create-table-sc1-t2" { SELECT run_commands_on_session_level_connection_to_node('CREATE TABLE sc1.t2 (a int)'); } +step "worker-57638-drop-table-sc1-t1" { SELECT run_commands_on_session_level_connection_to_node('DROP TABLE sc1.t1'); } +step "worker-57638-alter-table-rename-sc1-t1" { SELECT run_commands_on_session_level_connection_to_node('ALTER TABLE sc1.t1 RENAME TO t1_renamed'); } + +// Create a distributed-schema table via worker-57637 that causes ensuring that the reference tables are replicated to all nodes while adding a new worker node via the coordinator. +// worker-57637-create-table-sc1-t2 won't replicate the reference table to the new node, so the first call to coord-query-ref-placements won't show a placement on the new node, but the second call will do. +permutation "coord-create-table-ref" "coord-create-reference-table-ref" "coord-remove-worker-57638" "coord-begin" "coord-add-worker-57638" "worker-57637-start" "worker-57637-shard-replication-factor" "worker-57637-create-table-sc1-t2" "coord-commit" "coord-query-ref-placements" "coord-query-sc1-t2-placement" "worker-57637-shard-replication-factor" "worker-57637-create-table-sc1-t1" "worker-57637-stop" "coord-query-ref-placements" "coord-query-sc1-t1-placement" + +// create a distributed-schema table via worker-57637 while moving a shard of another table under the same schema via the coordinator +permutation "coord-shard-replication-factor" "coord-create-table-sc1-t1" "coord-query-sc1-t1-placement" "coord-begin" "coord-move-shard-sc1-t1" "worker-57637-start" "worker-57637-shard-replication-factor" "worker-57637-create-table-sc1-t2" "coord-commit" "worker-57637-stop" "coord-query-sc1-t1-placement" + +// create a distributed-schema table via worker-57637 while dropping the **only** another table under the same schema via worker-57638 +permutation "coord-shard-replication-factor" "coord-create-table-sc1-t1" "worker-57638-start" "worker-57638-begin" "worker-57638-drop-table-sc1-t1" "worker-57637-start" "worker-57637-shard-replication-factor" "worker-57637-create-table-sc1-t2" "worker-57638-commit" "worker-57638-stop" "worker-57637-stop" "coord-show-tables-in-schema-sc1" + +// move a shard via the coordinator while creating a distributed-schema table under the same schema via worker-57637 +permutation "coord-shard-replication-factor" "coord-create-table-sc1-t1" "coord-query-sc1-t1-placement" "worker-57637-start" "worker-57637-shard-replication-factor" "worker-57637-begin" "worker-57637-create-table-sc1-t2" "coord-move-shard-sc1-t1" "worker-57637-commit" "worker-57637-stop" "coord-query-sc1-t1-placement" + +// drop the **only** table of a distributed-schema via worker-57638 while creating another table under the same schema via worker-57637 +permutation "coord-shard-replication-factor" "coord-create-table-sc1-t1" "worker-57637-start" "worker-57637-shard-replication-factor" "worker-57638-start" "worker-57637-begin" "worker-57637-create-table-sc1-t2" "worker-57638-drop-table-sc1-t1" "worker-57637-commit" "worker-57638-stop" "worker-57637-stop" "coord-show-tables-in-schema-sc1" + +// create a distributed-schema table via worker-57638 while creating another table under the same schema via worker-57637 +permutation "worker-57637-start" "worker-57637-shard-replication-factor" "worker-57637-begin" "worker-57637-create-table-sc1-t1" "worker-57638-start" "worker-57638-shard-replication-factor" "worker-57638-create-table-sc1-t2" "worker-57637-commit" "worker-57637-stop" "worker-57638-commit" "worker-57638-stop" "coord-show-tables-in-schema-sc1" + +// create a distributed-schema table via worker-57637 that causes ensuring that the reference tables are replicated to all nodes while doing the same the coordinator +permutation "coord-remove-worker-57638" "coord-create-table-ref" "coord-create-reference-table-ref" "coord-add-worker-57638" "worker-57637-start" "worker-57637-begin" "worker-57637-shard-replication-factor" "worker-57637-create-table-sc1-t1" "coord-begin" "coord-shard-replication-factor" "coord-create-table-sc1-t2" "worker-57637-commit" "worker-57637-stop" "coord-commit" "coord-query-ref-placements" "coord-query-sc1-t1-placement" "coord-query-sc1-t2-placement" + +// create the same distributed-schema table from different workers +permutation "worker-57637-start" "worker-57637-shard-replication-factor" "worker-57637-begin" "worker-57637-create-table-sc1-t1" "worker-57638-start" "worker-57638-shard-replication-factor" "worker-57638-create-table-sc1-t1" "worker-57637-commit" "worker-57637-stop" "worker-57638-stop" "coord-show-tables-in-schema-sc1" + +// drop the same distributed-schema table from different workers +permutation "coord-shard-replication-factor" "coord-create-table-sc1-t1" "worker-57637-start" "worker-57637-begin" "worker-57637-drop-table-sc1-t1" "worker-57638-start" "worker-57638-drop-table-sc1-t1" "worker-57637-commit" "worker-57637-stop" "worker-57638-stop" "coord-show-tables-in-schema-sc1" + +// rename the same distributed-schema table from different workers +permutation "coord-shard-replication-factor" "coord-create-table-sc1-t1" "worker-57637-start" "worker-57637-begin" "worker-57637-alter-table-rename-sc1-t1" "worker-57638-start" "worker-57638-alter-table-rename-sc1-t1" "worker-57637-commit" "worker-57637-stop" "worker-57638-stop" "coord-show-tables-in-schema-sc1" diff --git a/src/test/regress/sql/failure_mx_metadata_sync.sql b/src/test/regress/sql/failure_mx_metadata_sync.sql index d8f82296f18..898edf7e741 100644 --- a/src/test/regress/sql/failure_mx_metadata_sync.sql +++ b/src/test/regress/sql/failure_mx_metadata_sync.sql @@ -67,12 +67,6 @@ SELECT count(*) > 0 AS is_table_distributed FROM pg_dist_partition WHERE logicalrelid='t2'::regclass; --- Failure to set groupid in the worker -SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").cancel(' || :pid || ')'); -SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").kill()'); -SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); - -- Failure to delete pg_dist_node entries from the worker SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").cancel(' || :pid || ')'); SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); diff --git a/src/test/regress/sql/issue_6592.sql b/src/test/regress/sql/issue_6592.sql index f48f88869b0..e387aa71855 100644 --- a/src/test/regress/sql/issue_6592.sql +++ b/src/test/regress/sql/issue_6592.sql @@ -1,4 +1,14 @@ -- https://github.com/citusdata/citus/issues/6592 + +-- first, make sure to remove the coordinator if it was already added +SET client_min_messages to ERROR; +SELECT COUNT(*)>=0 FROM ( + SELECT master_remove_node(nodename, nodeport) + FROM pg_dist_node + WHERE nodename = 'localhost' AND nodeport = :master_port +) q; +RESET client_min_messages; + SET citus.next_shard_id TO 180000; CREATE TABLE ref_table_to_be_dropped_6592 (key int); SELECT create_reference_table('ref_table_to_be_dropped_6592'); @@ -6,7 +16,7 @@ CREATE TABLE ref_table_oid AS SELECT oid FROM pg_class WHERE relname = 'ref_tabl SET citus.enable_ddl_propagation TO OFF; DROP TABLE ref_table_to_be_dropped_6592 CASCADE; -- citus_drop_all_shards doesn't drop shards and metadata --- ensure that coordinator is added to pg_dist_node +-- add the coordinator to pg_dist_node SET client_min_messages to ERROR; SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0); RESET client_min_messages; @@ -37,3 +47,70 @@ DELETE FROM pg_dist_shard WHERE shardid = 180000; DELETE FROM pg_dist_partition WHERE logicalrelid IN (SELECT oid FROM ref_table_oid); DROP TABLE ref_table_oid; SELECT 1 FROM citus_remove_node('localhost', :master_port); + +-- test the same when creating a distributed-schema table from a worker + +SET citus.next_shard_id TO 180100; +CREATE TABLE other_ref_table_to_be_dropped (key int); +SELECT create_reference_table('other_ref_table_to_be_dropped'); +CREATE TABLE other_ref_table_oid AS SELECT oid FROM pg_class WHERE relname = 'other_ref_table_to_be_dropped'; + +SET citus.enable_ddl_propagation TO OFF; +DROP TABLE other_ref_table_to_be_dropped CASCADE; -- citus_drop_all_shards doesn't drop shards and metadata +RESET citus.enable_ddl_propagation; + +-- add the coordinator to pg_dist_node +SET client_min_messages to ERROR; +SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0); +RESET client_min_messages; + +-- As we always grab the next shard id from the coordinator, we need to alter +-- the sequence on the coordinator, so we first store it. Since the connection +-- that we internally use to get the next shard id from the coordinator might +-- change, we cannot just set citus.next_shard_id on the coordinator because +-- doing so wouldn't affect the further connections to the coordinator. +SELECT last_value::bigint INTO pg_dist_shardid_seq_prev_state FROM pg_catalog.pg_dist_shardid_seq; +ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 180150; + +\c - - - :worker_1_port + +SET citus.enable_schema_based_sharding TO ON; +CREATE SCHEMA s1; +RESET citus.enable_schema_based_sharding; + +-- errors out for the dropped reference table +SET citus.shard_replication_factor TO 1; +CREATE TABLE s1.t1 (a int); + +\c - - - :worker_1_port +SET citus.enable_ddl_propagation TO OFF; +DELETE FROM pg_dist_partition WHERE logicalrelid = 'other_ref_table_to_be_dropped'::regclass; +DELETE FROM pg_dist_placement WHERE shardid = 180100; +DELETE FROM pg_dist_shard WHERE shardid = 180100; +DROP TABLE IF EXISTS other_ref_table_to_be_dropped; +DROP TABLE IF EXISTS other_ref_table_to_be_dropped_180100; +\c - - - :worker_2_port +SET citus.enable_ddl_propagation TO OFF; +DELETE FROM pg_dist_partition WHERE logicalrelid = 'other_ref_table_to_be_dropped'::regclass; +DELETE FROM pg_dist_placement WHERE shardid = 180100; +DELETE FROM pg_dist_shard WHERE shardid = 180100; +DROP TABLE IF EXISTS other_ref_table_to_be_dropped; +DROP TABLE IF EXISTS other_ref_table_to_be_dropped_180100; +\c - - - :master_port +DELETE FROM pg_dist_placement WHERE shardid = 180100; +DELETE FROM pg_dist_shard WHERE shardid = 180100; +DELETE FROM pg_dist_partition WHERE logicalrelid IN (SELECT oid FROM other_ref_table_oid); +DROP TABLE other_ref_table_oid; +DROP SCHEMA s1; +SELECT 1 FROM citus_remove_node('localhost', :master_port); + +-- reset pg_dist_shardid_seq on the coordinator +DO $proc$ +DECLARE + v_last_value bigint; +BEGIN + SELECT last_value INTO v_last_value FROM pg_dist_shardid_seq_prev_state; + EXECUTE format('ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH %s', v_last_value); +END$proc$; + +DROP TABLE pg_dist_shardid_seq_prev_state; diff --git a/src/test/regress/sql/multi_cluster_management.sql b/src/test/regress/sql/multi_cluster_management.sql index 71078adab24..4510b7f5a20 100644 --- a/src/test/regress/sql/multi_cluster_management.sql +++ b/src/test/regress/sql/multi_cluster_management.sql @@ -535,8 +535,13 @@ BEGIN; SET citus.enable_metadata_sync TO OFF; SELECT start_metadata_sync_to_all_nodes(); DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated; + + -- we're not interested in what we send to the nodes we're removing + SET LOCAL citus.log_remote_commands TO OFF; SELECT 1 FROM citus_remove_node('localhost', :worker_1_port); SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); + SET LOCAL citus.log_remote_commands TO ON; + SELECT 1 FROM citus_add_node('localhost', :worker_1_port); SELECT 1 FROM citus_add_node('localhost', :worker_2_port); ROLLBACK; diff --git a/src/test/regress/sql/multi_extension.sql b/src/test/regress/sql/multi_extension.sql index ebc4a505ee1..548e3af0e9b 100644 --- a/src/test/regress/sql/multi_extension.sql +++ b/src/test/regress/sql/multi_extension.sql @@ -766,6 +766,16 @@ SELECT * FROM multi_extension.print_extension_changes(); ALTER EXTENSION citus UPDATE TO '14.0-1'; SELECT * FROM multi_extension.print_extension_changes(); +-- Test downgrade to 14.0-1 from 14.1-1 +ALTER EXTENSION citus UPDATE TO '14.1-1'; +ALTER EXTENSION citus UPDATE TO '14.0-1'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + +-- Snapshot of state at 14.1-1 +ALTER EXTENSION citus UPDATE TO '14.1-1'; +SELECT * FROM multi_extension.print_extension_changes(); + DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version diff --git a/src/test/regress/sql/multi_metadata_sync.sql b/src/test/regress/sql/multi_metadata_sync.sql index a002a519c26..f373a426f77 100644 --- a/src/test/regress/sql/multi_metadata_sync.sql +++ b/src/test/regress/sql/multi_metadata_sync.sql @@ -838,12 +838,7 @@ DROP TABLE mx_testing_schema.mx_test_table; DROP TABLE mx_ref; DROP TABLE dist_table_1, dist_table_2; -SET client_min_messages TO ERROR; -SET citus.enable_ddl_propagation TO off; -- for enterprise CREATE USER non_super_metadata_user; -SET citus.enable_ddl_propagation TO on; -RESET client_min_messages; -SELECT run_command_on_workers('CREATE USER non_super_metadata_user'); GRANT EXECUTE ON FUNCTION start_metadata_sync_to_node(text,int) TO non_super_metadata_user; GRANT EXECUTE ON FUNCTION stop_metadata_sync_to_node(text,int,bool) TO non_super_metadata_user; GRANT ALL ON pg_dist_node TO non_super_metadata_user; diff --git a/src/test/regress/sql/multi_mx_metadata.sql b/src/test/regress/sql/multi_mx_metadata.sql index 5ac5176c47e..432aacdf141 100644 --- a/src/test/regress/sql/multi_mx_metadata.sql +++ b/src/test/regress/sql/multi_mx_metadata.sql @@ -150,7 +150,7 @@ SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schema -- shard also does not exist since we create shards in a transaction SELECT count(*) FROM pg_tables WHERE tablename LIKE 'objects_for_xacts2_%' and schemaname = 'citus_mx_schema_for_xacts'; --- make sure that citus_drop_all_shards does not work from the worker nodes +-- make sure that citus_drop_all_shards does not work from the worker nodes when the coordinator is not in the metadata SELECT citus_drop_all_shards('citus_mx_schema_for_xacts.objects_for_xacts'::regclass, 'citus_mx_schema_for_xacts', 'objects_for_xacts'); -- Ensure pg_dist_transaction is empty for test @@ -245,3 +245,11 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx \c - postgres - :master_port ALTER SYSTEM RESET citus.recover_2pc_interval; SELECT pg_reload_conf(); + +SET client_min_messages TO WARNING; +DROP SCHEMA citus_mx_schema_for_xacts CASCADE; +DROP TABLE distributed_mx_table CASCADE; +DROP USER no_access_mx; + +\c - postgres - :worker_1_port +DROP TABLE should_commit CASCADE; diff --git a/src/test/regress/sql/multi_mx_schema_support.sql b/src/test/regress/sql/multi_mx_schema_support.sql index 7f1e5d0de77..096687b18f4 100644 --- a/src/test/regress/sql/multi_mx_schema_support.sql +++ b/src/test/regress/sql/multi_mx_schema_support.sql @@ -321,6 +321,8 @@ SELECT table_schema AS "Shards' Schema" GROUP BY table_schema; -- Show that altering distributed schema is not allowed on worker nodes +-- when the coordinator is not in the metadata. +SELECT COUNT(*)=0 FROM pg_dist_node WHERE groupid = 0; -- verify that the coordinator is not in the metadata ALTER SCHEMA mx_old_schema RENAME TO temp_mx_old_schema; \c - - - :master_port diff --git a/src/test/regress/sql/multi_test_catalog_views.sql b/src/test/regress/sql/multi_test_catalog_views.sql index 249b2d274b5..96ca28ce47b 100644 --- a/src/test/regress/sql/multi_test_catalog_views.sql +++ b/src/test/regress/sql/multi_test_catalog_views.sql @@ -1,20 +1,14 @@ --- create a temporary custom version of this function that's normally defined --- in multi_test_helpers, so that this file can be run parallel with --- multi_test_helpers during the minimal schedules -CREATE OR REPLACE FUNCTION run_command_on_master_and_workers_temp(p_sql text) -RETURNS void LANGUAGE plpgsql AS $$ -BEGIN - EXECUTE p_sql; - PERFORM run_command_on_workers(p_sql); -END;$$; - -- The following views are intended as alternatives to \d commands, whose -- output changed in PostgreSQL 10. In particular, they must be used any time -- a test wishes to print out the structure of a relation, which previously -- was safely accomplished by a \d invocation. -SELECT run_command_on_master_and_workers_temp( -$desc_views$ -CREATE VIEW table_fkey_cols AS +-- +-- As we propagate CREATE VIEW commands when the view doesn't depend on an +-- un-distributable dependency, all below views are implicitly propagated as +-- they only depend on catalog objects, which are created by initdb for each +-- node separately and so are not assumed to be un-distributable by +-- GetUndistributableDependency(). +CREATE OR REPLACE VIEW table_fkey_cols AS SELECT rc.constraint_name AS "name", kcu.column_name AS "column_name", uc_kcu.column_name AS "refd_column_name", @@ -29,7 +23,7 @@ WHERE rc.constraint_schema = kcu.constraint_schema AND rc.unique_constraint_schema = uc_kcu.constraint_schema AND rc.unique_constraint_name = uc_kcu.constraint_name; -CREATE VIEW table_fkeys AS +CREATE OR REPLACE VIEW table_fkeys AS SELECT name AS "Constraint", format('FOREIGN KEY (%s) REFERENCES %s(%s)', string_agg(DISTINCT quote_ident(column_name), ', '), @@ -39,7 +33,7 @@ SELECT name AS "Constraint", FROM table_fkey_cols GROUP BY (name, relid); -CREATE VIEW table_attrs AS +CREATE OR REPLACE VIEW table_attrs AS SELECT c.column_name AS "name", c.data_type AS "type", CASE @@ -55,7 +49,7 @@ SELECT c.column_name AS "name", FROM information_schema.columns AS c ORDER BY ordinal_position; -CREATE VIEW table_desc AS +CREATE OR REPLACE VIEW table_desc AS SELECT "name" AS "Column", "type" || "modifier" AS "Type", rtrim(( @@ -85,7 +79,7 @@ WHERE c.contype <> 'n' -- drop NOT NULL AND c.conrelid <> 0 -- table-level (exclude domain checks) ORDER BY "Constraint", "Definition"; -CREATE VIEW index_attrs AS +CREATE OR REPLACE VIEW index_attrs AS WITH indexoid AS ( SELECT c.oid, n.nspname, @@ -108,8 +102,3 @@ WHERE true AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attrelid, a.attnum; - -$desc_views$ -); - -DROP FUNCTION run_command_on_master_and_workers_temp(p_sql text); diff --git a/src/test/regress/sql/multi_test_helpers.sql b/src/test/regress/sql/multi_test_helpers.sql index f6829ead0d3..2881a087033 100644 --- a/src/test/regress/sql/multi_test_helpers.sql +++ b/src/test/regress/sql/multi_test_helpers.sql @@ -206,9 +206,9 @@ RETURNS jsonb AS $func$ EXECUTE format( $$ SELECT jsonb_agg(to_jsonb(q1.*) ORDER BY q1.constraint_names) AS fkeys_with_different_config FROM ( - SELECT array_agg(constraint_name ORDER BY constraint_oid) AS constraint_names, - array_agg(referencing_table::regclass::text ORDER BY constraint_oid) AS referencing_tables, - array_agg(referenced_table::regclass::text ORDER BY constraint_oid) AS referenced_tables, + SELECT array_agg(constraint_name ORDER BY constraint_name) AS constraint_names, + array_agg(referencing_table::regclass::text ORDER BY constraint_name) AS referencing_tables, + array_agg(referenced_table::regclass::text ORDER BY constraint_name) AS referenced_tables, referencing_columns, referenced_columns, deferable, deferred, on_update, on_delete, match_type, referencing_columns_set_null_or_default FROM ( SELECT @@ -254,8 +254,8 @@ RETURNS jsonb AS $func$ EXECUTE format( $$ SELECT jsonb_agg(to_jsonb(q1.*) ORDER BY q1.indexnames) AS index_defs FROM ( - SELECT array_agg(indexname ORDER BY indexrelid) AS indexnames, - array_agg(indexdef ORDER BY indexrelid) AS indexdefs + SELECT array_agg(indexname ORDER BY indexrelid::regclass::text) AS indexnames, + array_agg(indexdef ORDER BY indexrelid::regclass::text) AS indexdefs FROM pg_indexes JOIN pg_index ON (indexrelid = (schemaname || '.' || indexname)::regclass) @@ -282,7 +282,7 @@ RETURNS jsonb AS $func$ SELECT column_name, column_default::text, generation_expression::text FROM information_schema.columns WHERE table_schema = '%1$s' AND table_name = '%2$s' AND - column_default IS NOT NULL OR generation_expression IS NOT NULL + (column_default IS NOT NULL OR generation_expression IS NOT NULL) ) q1 $$, schemaname, tablename) INTO result; @@ -299,7 +299,7 @@ RETURNS jsonb AS $func$ $$ SELECT to_jsonb(q2.*) FROM ( SELECT relnames, jsonb_agg(to_jsonb(q1.*) - 'relnames' ORDER BY q1.column_name) AS column_attrs FROM ( - SELECT array_agg(attrelid::regclass::text ORDER BY attrelid) AS relnames, + SELECT array_agg(attrelid::regclass::text ORDER BY attrelid::regclass::text) AS relnames, attname AS column_name, typname AS type_name, collname AS collation_name, attcompression AS compression_method, attnotnull AS not_null FROM pg_attribute pa LEFT JOIN pg_type pt ON (pa.atttypid = pt.oid) @@ -794,4 +794,3 @@ BEGIN END LOOP; END; $$; - diff --git a/src/test/regress/sql/multi_unsupported_worker_operations.sql b/src/test/regress/sql/multi_unsupported_worker_operations.sql index 324b8772b76..4ca1e50e139 100644 --- a/src/test/regress/sql/multi_unsupported_worker_operations.sql +++ b/src/test/regress/sql/multi_unsupported_worker_operations.sql @@ -79,8 +79,10 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_tabl \d mx_test_index -- citus_drop_all_shards -SELECT citus_drop_all_shards('mx_table'::regclass, 'public', 'mx_table'); -SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_table'::regclass; +BEGIN; + SELECT citus_drop_all_shards('mx_table'::regclass, 'public', 'mx_table'); + SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_table'::regclass; +ROLLBACK; -- master_add_inactive_node @@ -142,9 +144,11 @@ DROP TABLE mx_table; SELECT count(*) FROM mx_table; -- master_drop_distributed_table_metadata -SELECT master_remove_distributed_table_metadata_from_workers('mx_table'::regclass, 'public', 'mx_table'); -SELECT master_remove_partition_metadata('mx_table'::regclass, 'public', 'mx_table'); -SELECT count(*) FROM mx_table; +BEGIN; + SELECT master_remove_distributed_table_metadata_from_workers('mx_table'::regclass, 'public', 'mx_table'); + SELECT master_remove_partition_metadata('mx_table'::regclass, 'public', 'mx_table'); + SELECT count(*) FROM mx_table; +ROLLBACK; -- citus_copy_shard_placement SELECT logicalrelid, shardid AS testshardid, nodename, nodeport @@ -188,3 +192,6 @@ ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id; SELECT start_metadata_sync_to_node('localhost', :worker_2_port); RESET citus.shard_replication_factor; + +-- start metadata sync to node again to make the test re-runnable +SELECT start_metadata_sync_to_node('localhost', :worker_2_port); diff --git a/src/test/regress/sql/schema_based_sharding.sql b/src/test/regress/sql/schema_based_sharding.sql index f0b2276df00..fc12c6fda2c 100644 --- a/src/test/regress/sql/schema_based_sharding.sql +++ b/src/test/regress/sql/schema_based_sharding.sql @@ -1003,14 +1003,6 @@ DROP ROLE test_non_super_user; \c - - - :worker_1_port --- test creating a tenant table from workers -CREATE TABLE tenant_3.tbl_1(a int, b text); - --- test creating a tenant schema from workers -SET citus.enable_schema_based_sharding TO ON; -CREATE SCHEMA worker_tenant_schema; -SET citus.enable_schema_based_sharding TO OFF; - -- Enable the GUC on workers to make sure that the CREATE SCHEMA/ TABLE -- commands that we send to workers don't recursively try creating a -- tenant schema / table. @@ -1022,12 +1014,15 @@ SELECT pg_reload_conf(); ALTER SYSTEM SET citus.enable_schema_based_sharding TO ON; SELECT pg_reload_conf(); --- Verify that citus_internal.unregister_tenant_schema_globally is a no-op --- on workers. +-- Verify that citus_internal.unregister_tenant_schema_globally can be called +-- from workers too, but it will fail for this case as we didn't yet drop the +-- schema. SELECT citus_internal.unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); \c - - - :master_port +SET client_min_messages TO WARNING; + SET search_path TO regular_schema; SET citus.next_shard_id TO 1950000; SET citus.shard_count TO 32; diff --git a/src/test/regress/sql/schema_based_sharding_from_workers_a.sql b/src/test/regress/sql/schema_based_sharding_from_workers_a.sql new file mode 100644 index 00000000000..6f12977260e --- /dev/null +++ b/src/test/regress/sql/schema_based_sharding_from_workers_a.sql @@ -0,0 +1,1383 @@ +-- This is heavily based on schema_based_sharding.sql test file. +-- Only differences are; +-- - we don't check some of the the functionality tested there (e.g., testing of some of the internal UDFs) +-- - we test schema-based sharding features (e.g., DDLs, query etc.) using the same SQL from workers this time +-- - when we verify the things, we always make sure to do that on all nodes to ensure that we consistently sync +-- metadata changes when a command is issued from the workers too + +SET client_min_messages TO WARNING; + +SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0); + +SET client_min_messages TO NOTICE; + +SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); + +-- When creating a tenant table from workers, we always fetch the next shard id +-- and placement id from the coordinator because we never sync those sequences to +-- workers. For this reason, along this test file, we always set the next shard id +-- on the coordinator when needed, rather than setting it on the current worker node. +-- +-- Note that setting citus.next_shard_id on the coordinator would not work if the +-- citus internal connection we use to execute master_get_new_shardid() on the +-- coordinator changes because the underlying function, GetNextShardIdInternal(), +-- just increments NextShardId for the current session. For this reason, we instead +-- set pg_dist_shardid_seq on the coordinator in the tests where we test creating +-- distributed tables from a worker and where we want to use consistent shard ids. +-- +-- At the end of the test file, we reset pg_dist_shardid_seq. +SELECT last_value::bigint INTO pg_dist_shardid_seq_prev_state FROM pg_catalog.pg_dist_shardid_seq; + +\c - - - :worker_1_port +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2050000;$$); + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; + +SET citus.enable_schema_based_sharding TO ON; + +-- empty tenant +CREATE SCHEMA "tenant\'_1"; +CREATE SCHEMA IF NOT EXISTS "tenant\'_1"; + +-- non-empty tenant +CREATE SCHEMA "tenant\'_2"; +CREATE TABLE "tenant\'_2".test_table(a int, b text); + +-- empty tenant +CREATE SCHEMA "tenant\'_3"; +CREATE TABLE "tenant\'_3".test_table(a int, b text); +DROP TABLE "tenant\'_3".test_table; + +\c - - - :master_port + +-- add a node after creating tenant schemas +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2050100;$$); + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; + +CREATE SCHEMA regular_schema; +SET search_path TO regular_schema; + +-- Verify that citus_internal.unregister_tenant_schema_globally can only +-- be called on schemas that are dropped already. +SELECT citus_internal.unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); + +-- show that regular_schema doesn't show up in pg_dist_schema +SELECT COUNT(*)=0 FROM pg_dist_schema WHERE schemaid::regnamespace::text = 'regular_schema'; + +CREATE TABLE regular_schema.citus_local_tbl(id int); +SELECT citus_add_local_table_to_metadata('regular_schema.citus_local_tbl'); + +CREATE TABLE regular_schema.hash_dist_tbl(id int); +SELECT create_distributed_table('regular_schema.hash_dist_tbl', 'id'); + +CREATE TABLE regular_schema.ref_tbl(id int PRIMARY KEY); +SELECT create_reference_table('regular_schema.ref_tbl'); + +CREATE TABLE regular_schema.ref_tbl_1(id int PRIMARY KEY); +SELECT create_reference_table('regular_schema.ref_tbl_1'); + +CREATE TABLE regular_schema.pg_local_tbl3(id int REFERENCES regular_schema.ref_tbl_1(id)); + +CREATE TABLE regular_schema.citus_local_partitioned_table(a int, b text) PARTITION BY RANGE (a); +SELECT citus_add_local_table_to_metadata('regular_schema.citus_local_partitioned_table'); + +CREATE TABLE regular_schema.dist_partitioned_table(a int, b text) PARTITION BY RANGE (a); +SELECT create_distributed_table('regular_schema.dist_partitioned_table', 'a'); + +CREATE TABLE regular_schema.parent_attach_test_citus_local(a int, b text) PARTITION BY RANGE (a); +SELECT citus_add_local_table_to_metadata('regular_schema.parent_attach_test_citus_local'); + +CREATE TABLE regular_schema.parent_attach_test_dist(a int, b text) PARTITION BY RANGE (a); +SELECT create_distributed_table('regular_schema.parent_attach_test_dist', 'a'); + +CREATE TABLE regular_schema.child_attach_test_citus_local(a int, b text); +SELECT citus_add_local_table_to_metadata('regular_schema.child_attach_test_citus_local'); + +CREATE TABLE regular_schema.child_attach_test_dist(a int, b text); +SELECT create_distributed_table('regular_schema.child_attach_test_dist', 'a'); + +CREATE TABLE regular_schema.citus_local(a int, b text); +SELECT citus_add_local_table_to_metadata('regular_schema.citus_local'); + +CREATE TABLE regular_schema.dist(a int, b text); +SELECT create_distributed_table('regular_schema.dist', 'a'); + +CREATE TYPE regular_schema.employee_type AS (name text, salary numeric); + +CREATE TABLE regular_schema.reference_table(a int PRIMARY KEY); +SELECT create_reference_table('regular_schema.reference_table'); + +CREATE FUNCTION regular_schema.increment_one() +RETURNS void +LANGUAGE plpgsql +AS $$ +BEGIN + UPDATE search_path_test SET a = a + 1; +END; +$$; + +CREATE FUNCTION regular_schema.decrement_one() +RETURNS void +LANGUAGE plpgsql +AS $$ +BEGIN + UPDATE search_path_test SET a = a - 1; +END; +$$; + +CREATE SCHEMA regular_schema_1; +CREATE TABLE regular_schema_1.dist_table(a int, b text); +SELECT create_distributed_table('regular_schema_1.dist_table', 'a', shard_count => 4); + +\c - - - :worker_1_port + +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2050300;$$); + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; + +ALTER SCHEMA "tenant\'_1" RENAME TO tenant_1; +ALTER SCHEMA "tenant\'_2" RENAME TO tenant_2; +ALTER SCHEMA "tenant\'_3" RENAME TO tenant_3; + +-- verify we cannot set tenant table's schema to regular schema from workers +CREATE TABLE tenant_2.test_table2(id int); +ALTER TABLE tenant_2.test_table2 SET SCHEMA regular_schema; + +-- verify we can set regular table's schema to distributed schema +CREATE TABLE regular_schema.test_table3(id int); +ALTER TABLE regular_schema.test_table3 SET SCHEMA tenant_2; +-- verify that tenant_2.test_table3 is recorded in pg_dist_partition as a single-shard table. +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=1 FROM pg_dist_partition +WHERE logicalrelid = 'tenant_2.test_table3'::regclass AND + partmethod = 'n' AND repmodel = 's' AND colocationid > 0; +$$); +-- verify that regular_schema.test_table3 does not exist +SELECT * FROM regular_schema.test_table3; + +-- verify we cannot set tenant table's schema to another distributed schema from workers +CREATE TABLE tenant_2.test_table4(id int); +ALTER TABLE tenant_2.test_table4 SET SCHEMA tenant_3; + +-- verify that we can put a local table in regular schema into distributed schema +CREATE TABLE regular_schema.pg_local_tbl(id int); +ALTER TABLE regular_schema.pg_local_tbl SET SCHEMA tenant_2; + +-- verify that we can put a Citus local table in regular schema into distributed schema +ALTER TABLE regular_schema.citus_local_tbl SET SCHEMA tenant_2; + +-- verify that we do not allow a hash distributed table in regular schema into distributed schema +ALTER TABLE regular_schema.hash_dist_tbl SET SCHEMA tenant_2; + +-- verify that we do not allow a reference table in regular schema into distributed schema +ALTER TABLE regular_schema.ref_tbl SET SCHEMA tenant_2; + +-- verify that we cannot put a table in tenant schema into regular schema +CREATE TABLE tenant_2.tenant_tbl(id int); +ALTER TABLE tenant_2.tenant_tbl SET SCHEMA regular_schema; + +-- verify that we cannot put a table in tenant schema into another tenant schema +CREATE TABLE tenant_2.tenant_tbl2(id int); +ALTER TABLE tenant_2.tenant_tbl2 SET SCHEMA tenant_3; + +-- verify that we do not allow a local table in regular schema into distributed schema if it has foreign key to a non-reference table in another schema +CREATE TABLE regular_schema.pg_local_tbl1(id int PRIMARY KEY); +CREATE TABLE regular_schema.pg_local_tbl2(id int REFERENCES regular_schema.pg_local_tbl1(id)); +ALTER TABLE regular_schema.pg_local_tbl2 SET SCHEMA tenant_2; + +-- verify that we allow a local table in regular schema into distributed schema if it has foreign key to a reference table in another schema +ALTER TABLE regular_schema.pg_local_tbl3 SET SCHEMA tenant_2; + +-- verify that we do not allow a table in tenant schema into regular schema if it has foreign key to/from another table in the same schema +DROP TABLE tenant_2.tenant_tbl2; + +CREATE TABLE tenant_2.tenant_tbl1(id int PRIMARY KEY); +CREATE TABLE tenant_2.tenant_tbl2(id int REFERENCES tenant_2.tenant_tbl1(id)); +ALTER TABLE tenant_2.tenant_tbl1 SET SCHEMA regular_schema; +ALTER TABLE tenant_2.tenant_tbl2 SET SCHEMA regular_schema; + +-- verify that we do not allow a table in distributed schema into another distributed schema if it has foreign key to/from another table in the same schema +CREATE TABLE tenant_2.tenant_tbl3(id int PRIMARY KEY); +CREATE TABLE tenant_2.tenant_tbl4(id int REFERENCES tenant_2.tenant_tbl3(id)); +ALTER TABLE tenant_2.tenant_tbl3 SET SCHEMA tenant_3; +ALTER TABLE tenant_2.tenant_tbl4 SET SCHEMA tenant_3; + +-- alter set non-existent schema +ALTER TABLE tenant_2.test_table SET SCHEMA ghost_schema; +ALTER TABLE IF EXISTS tenant_2.test_table SET SCHEMA ghost_schema; +-- alter set non-existent table +ALTER TABLE tenant_2.ghost_table SET SCHEMA ghost_schema; +ALTER TABLE IF EXISTS tenant_2.ghost_table SET SCHEMA ghost_schema; + +-- verify that colocation id is set for empty tenants too +SELECT result FROM run_command_on_all_nodes($$ + SELECT array_agg(colocationid > 0) FROM pg_dist_schema + WHERE schemaid::regnamespace::text IN ('tenant_1', 'tenant_3'); +$$); + +-- Verify that tenant_2.test_table is recorded in pg_dist_partition as a +-- single-shard table. +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=1 FROM pg_dist_partition +WHERE logicalrelid = 'tenant_2.test_table'::regclass AND + partmethod = 'n' AND repmodel = 's' AND colocationid > 0; +$$); + +-- verify that colocation id is properly set for non-empty tenant schema +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid = ( + SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_2.test_table'::regclass + ) + FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_2'; +$$); + +-- create a tenant table for tenant_1 after add_node +CREATE TABLE tenant_1.test_table(a int, b text); + +-- verify that colocation id is properly set for now-non-empty tenant schema +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid = ( + SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_1.test_table'::regclass + ) + FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_1'; +$$); + +-- verify that tenant_1 and tenant_2 have different colocation ids +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(DISTINCT(colocationid))=2 FROM pg_dist_schema +WHERE schemaid::regnamespace::text IN ('tenant_1', 'tenant_2'); +$$); + +-- verify that we don't allow creating tenant tables via CREATE SCHEMA command +CREATE SCHEMA schema_using_schema_elements CREATE TABLE test_table(a int, b text); + +CREATE SCHEMA tenant_4; +CREATE TABLE tenant_4.tbl_1(a int, b text); +CREATE TABLE tenant_4.tbl_2(a int, b text); + +-- verify that we don't allow creating a foreign table in a tenant schema, with a nice error message +CREATE FOREIGN TABLE tenant_4.foreign_table ( + id bigint not null, + full_name text not null default '' +) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true', table_name 'foreign_table'); + +-- verify that we don't allow creating a foreign table in a tenant schema +CREATE TEMPORARY TABLE tenant_4.temp_table (a int, b text); + +CREATE TABLE tenant_4.partitioned_table(a int, b text, PRIMARY KEY (a)) PARTITION BY RANGE (a); +CREATE TABLE tenant_4.partitioned_table_child_1 PARTITION OF tenant_4.partitioned_table FOR VALUES FROM (1) TO (2); + +CREATE TABLE tenant_4.another_partitioned_table(a int, b text, FOREIGN KEY (a) REFERENCES tenant_4.partitioned_table(a)) PARTITION BY RANGE (a); +CREATE TABLE tenant_4.another_partitioned_table_child PARTITION OF tenant_4.another_partitioned_table FOR VALUES FROM (1) TO (2); + +-- verify that we allow creating partitioned tables in a tenant schema +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=1 FROM pg_dist_partition +WHERE logicalrelid = 'tenant_4.partitioned_table_child_1'::regclass AND + partmethod = 'n' AND repmodel = 's' AND colocationid = ( + SELECT colocationid FROM pg_dist_partition + WHERE logicalrelid = 'tenant_4.partitioned_table'::regclass); +$$); + +SELECT result FROM run_command_on_all_nodes($$ +SELECT EXISTS( + SELECT 1 + FROM pg_inherits + WHERE inhrelid = 'tenant_4.partitioned_table_child_1'::regclass AND + inhparent = 'tenant_4.partitioned_table'::regclass +) AS is_partition; +$$); + +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=1 FROM pg_dist_partition +WHERE logicalrelid = 'tenant_4.another_partitioned_table_child'::regclass AND + partmethod = 'n' AND repmodel = 's' AND colocationid = ( + SELECT colocationid FROM pg_dist_partition + WHERE logicalrelid = 'tenant_4.another_partitioned_table'::regclass); +$$); + +SELECT result FROM run_command_on_all_nodes($$ +SELECT EXISTS( + SELECT 1 + FROM pg_inherits + WHERE inhrelid = 'tenant_4.another_partitioned_table_child'::regclass AND + inhparent = 'tenant_4.another_partitioned_table'::regclass +) AS is_partition; +$$); + +-- verify the foreign key between parents +SELECT result FROM run_command_on_all_nodes($$ +SELECT EXISTS( + SELECT 1 + FROM pg_constraint + WHERE conrelid = 'tenant_4.another_partitioned_table'::regclass AND + confrelid = 'tenant_4.partitioned_table'::regclass AND + contype = 'f' +) AS foreign_key_exists; +$$); + +-- We want to hide the error message context because the node reporting the foreign key +-- violation might change from one run to another. +\set VERBOSITY terse + +INSERT INTO tenant_4.another_partitioned_table VALUES (1, 'a'); + +\set VERBOSITY default + +INSERT INTO tenant_4.partitioned_table VALUES (1, 'a'); +INSERT INTO tenant_4.another_partitioned_table VALUES (1, 'a'); + +CREATE SCHEMA tenant_5; +CREATE TABLE tenant_5.tbl_1(a int, b text); + +CREATE TABLE tenant_5.partitioned_table(a int, b text) PARTITION BY RANGE (a); + +-- verify that we don't allow creating a partition table that is child of a partitioned table in a different tenant schema +CREATE TABLE tenant_4.partitioned_table_child_2 PARTITION OF tenant_5.partitioned_table FOR VALUES FROM (1) TO (2); + +-- verify that we don't allow creating a local partition table that is child of a tenant partitioned table +CREATE TABLE regular_schema.local_child_table PARTITION OF tenant_5.partitioned_table FOR VALUES FROM (1) TO (2); + +SET citus.use_citus_managed_tables TO ON; +CREATE TABLE regular_schema.local_child_table PARTITION OF tenant_5.partitioned_table FOR VALUES FROM (1) TO (2); +RESET citus.use_citus_managed_tables; + +CREATE TABLE regular_schema.local_partitioned_table(a int, b text) PARTITION BY RANGE (a); + +-- verify that we don't allow creating a partition table that is child of a non-tenant partitioned table +CREATE TABLE tenant_4.partitioned_table_child_2 PARTITION OF regular_schema.local_partitioned_table FOR VALUES FROM (1) TO (2); +CREATE TABLE tenant_4.partitioned_table_child_2 PARTITION OF regular_schema.citus_local_partitioned_table FOR VALUES FROM (1) TO (2); +CREATE TABLE tenant_4.partitioned_table_child_2 PARTITION OF regular_schema.dist_partitioned_table FOR VALUES FROM (1) TO (2); + +CREATE TABLE tenant_4.parent_attach_test(a int, b text) PARTITION BY RANGE (a); +CREATE TABLE tenant_4.child_attach_test(a int, b text); + +CREATE TABLE tenant_5.parent_attach_test(a int, b text) PARTITION BY RANGE (a); +CREATE TABLE tenant_5.child_attach_test(a int, b text); + +CREATE TABLE regular_schema.parent_attach_test_local(a int, b text) PARTITION BY RANGE (a); + +CREATE TABLE regular_schema.child_attach_test_local(a int, b text); + +-- verify that we don't allow attaching a tenant table into a tenant partitioned table, if they are not in the same schema +ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION tenant_5.child_attach_test FOR VALUES FROM (1) TO (2); + +-- verify that we don't allow attaching a non-tenant table into a tenant partitioned table +ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION regular_schema.child_attach_test_local FOR VALUES FROM (1) TO (2); +ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION regular_schema.child_attach_test_citus_local FOR VALUES FROM (1) TO (2); +ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION regular_schema.child_attach_test_dist FOR VALUES FROM (1) TO (2); + +-- verify that we don't allow attaching a tenant table into a non-tenant partitioned table +ALTER TABLE regular_schema.parent_attach_test_local ATTACH PARTITION tenant_4.child_attach_test FOR VALUES FROM (1) TO (2); +ALTER TABLE regular_schema.parent_attach_test_citus_local ATTACH PARTITION tenant_4.child_attach_test FOR VALUES FROM (1) TO (2); +ALTER TABLE regular_schema.parent_attach_test_dist ATTACH PARTITION tenant_4.child_attach_test FOR VALUES FROM (1) TO (2); + +ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION tenant_4.child_attach_test FOR VALUES FROM (1) TO (2); + +-- verify that we don't allow multi-level partitioning on tenant tables +CREATE TABLE tenant_4.multi_level_test(a int, b text) PARTITION BY RANGE (a); +ALTER TABLE tenant_4.parent_attach_test ATTACH PARTITION tenant_4.multi_level_test FOR VALUES FROM (1) TO (2); + +-- verify that we allow attaching a tenant table into a tenant partitioned table, if they are in the same schema +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=1 FROM pg_dist_partition +WHERE logicalrelid = 'tenant_4.parent_attach_test'::regclass AND + partmethod = 'n' AND repmodel = 's' AND colocationid = ( + SELECT colocationid FROM pg_dist_partition + WHERE logicalrelid = 'tenant_4.child_attach_test'::regclass); +$$); + +SELECT result FROM run_command_on_all_nodes($$ +SELECT EXISTS( + SELECT 1 + FROM pg_inherits + WHERE inhrelid = 'tenant_4.child_attach_test'::regclass AND + inhparent = 'tenant_4.parent_attach_test'::regclass +) AS is_partition; +$$); + +-- verify that we allow detaching a tenant partition from a tenant partitioned table +ALTER TABLE tenant_4.parent_attach_test DETACH PARTITION tenant_4.child_attach_test; + +-- verify they're still sharing the same colocation group +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=1 FROM pg_dist_partition +WHERE logicalrelid = 'tenant_4.parent_attach_test'::regclass AND + partmethod = 'n' AND repmodel = 's' AND colocationid = ( + SELECT colocationid FROM pg_dist_partition + WHERE logicalrelid = 'tenant_4.child_attach_test'::regclass); +$$); + +-- verify that they're no longer in parent-child relationship +SELECT result FROM run_command_on_all_nodes($$ +SELECT NOT EXISTS( + SELECT 1 + FROM pg_inherits + WHERE inhrelid = 'tenant_4.child_attach_test'::regclass AND + inhparent = 'tenant_4.parent_attach_test'::regclass +) AS is_partition; +$$); + +-- errors out because shard replication factor > 1 +SET citus.shard_replication_factor TO 2; +CREATE TABLE tenant_4.tbl_3 AS SELECT 1 AS a, 'text' as b; +SET citus.shard_replication_factor TO 1; +-- verify that we allow creating tenant tables by using CREATE TABLE AS / SELECT INTO commands +CREATE TABLE tenant_4.tbl_3 AS SELECT 1 AS a, 'text' as b; +CREATE TEMP TABLE IF NOT EXISTS tenant_4.tbl_4 AS SELECT 1 as a, 'text' as b; +CREATE UNLOGGED TABLE IF NOT EXISTS tenant_4.tbl_4 AS SELECT 1 as a, 'text' as b WITH NO DATA; +-- the same command, no changes because of IF NOT EXISTS +CREATE UNLOGGED TABLE IF NOT EXISTS tenant_4.tbl_4 AS SELECT 1 as a, 'text' as b WITH NO DATA; +SELECT 1 as a, 'text' as b INTO tenant_4.tbl_5; + +-- verify we can query the newly created tenant tables from any node +SELECT result FROM run_command_on_all_nodes($$ +SELECT jsonb_agg( + jsonb_build_object( + 'a', a, + 'b', b + ) + ORDER BY a + ) + FROM tenant_4.tbl_3 +$$); +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*) FROM tenant_4.tbl_5 +$$); + +-- verify that we don't allow creating tenant tables by using CREATE TABLE OF commands +CREATE TABLE tenant_4.employees OF regular_schema.employee_type ( + PRIMARY KEY (name), + salary WITH OPTIONS DEFAULT 1000 +); + +-- verify that we act accordingly when if not exists is used +CREATE TABLE IF NOT EXISTS tenant_4.tbl_6(a int, b text); +CREATE TABLE IF NOT EXISTS tenant_4.tbl_6(a int, b text); + +SELECT result FROM run_command_on_all_nodes($$ +SELECT jsonb_agg( + jsonb_build_object( + 'logicalrelid', logicalrelid, + 'partmethod', partmethod + ) + ORDER BY logicalrelid::text + ) + FROM pg_dist_partition + WHERE logicalrelid::text LIKE 'tenant_4.tbl%' +$$); + +CREATE TABLE regular_schema.local(a int, b text); + +-- verify that we can create a table LIKE another table +CREATE TABLE tenant_5.test_table_like_1(LIKE tenant_5.tbl_1); -- using a table from the same schema +CREATE TABLE tenant_5.test_table_like_2(LIKE tenant_4.tbl_1); -- using a table from another schema +CREATE TABLE tenant_5.test_table_like_3(LIKE regular_schema.local); -- using a local table +CREATE TABLE tenant_5.test_table_like_4(LIKE regular_schema.citus_local); -- using a citus local table +CREATE TABLE tenant_5.test_table_like_5(LIKE regular_schema.dist); -- using a distributed table + +-- verify that all of them are converted to tenant tables +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*) = 5 +FROM pg_dist_partition +WHERE logicalrelid::text LIKE 'tenant_5.test_table_like_%' AND + partmethod = 'n' AND repmodel = 's' AND colocationid = ( + SELECT colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_5' + ); +$$); + +CREATE TABLE regular_schema.local_table_using_like(LIKE tenant_5.tbl_1); + +-- verify that regular_schema.local_table_using_like is not a tenant table +SELECT COUNT(*) = 0 FROM pg_dist_partition +WHERE logicalrelid = 'regular_schema.local_table_using_like'::regclass; + +-- verify that INHERITS syntax is not supported when creating a tenant table +CREATE TABLE tenant_5.test_table_inherits_1(x int) INHERITS (tenant_5.tbl_1); -- using a table from the same schema +CREATE TABLE tenant_5.test_table_inherits_2(x int) INHERITS (tenant_4.tbl_1); -- using a table from another schema +CREATE TABLE tenant_5.test_table_inherits_3(x int) INHERITS (regular_schema.local); -- using a local table +CREATE TABLE tenant_5.test_table_inherits_4(x int) INHERITS (regular_schema.citus_local); -- using a citus local table +CREATE TABLE tenant_5.test_table_inherits_5(x int) INHERITS (regular_schema.dist); -- using a distributed table + +-- verify that INHERITS syntax is not supported when creating a local table based on a tenant table +CREATE TABLE regular_schema.local_table_using_inherits(x int) INHERITS (tenant_5.tbl_1); + +CREATE TABLE tenant_5.tbl_2(a int, b text); + +CREATE SCHEMA "CiTuS.TeeN_108"; +ALTER SCHEMA "CiTuS.TeeN_108" RENAME TO citus_teen_proper; + +SELECT schemaid AS citus_teen_schemaid FROM pg_dist_schema WHERE schemaid::regnamespace::text = 'citus_teen_proper' \gset +SELECT colocationid AS citus_teen_colocationid FROM pg_dist_schema WHERE schemaid::regnamespace::text = 'citus_teen_proper' \gset + +SELECT result FROM run_command_on_all_nodes($$ + SELECT schemaid INTO citus_teen_schemaid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'citus_teen_proper' +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid INTO citus_teen_colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'citus_teen_proper' +$$); + +-- verify that colocation id is set for the tenant with a weird name too +SELECT :citus_teen_colocationid > 0; + +-- verify that the same colocation id is used on other nodes too +SELECT format( + 'SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=1 FROM pg_dist_schema + WHERE schemaid::regnamespace::text = ''citus_teen_proper'' AND + colocationid = %s; + $$);', +:citus_teen_colocationid) AS verify_all_nodes_query \gset + +:verify_all_nodes_query + +ALTER SCHEMA citus_teen_proper RENAME TO "CiTuS.TeeN_108"; + +SET citus.enable_schema_based_sharding TO OFF; + +-- Show that the tables created in tenant schemas are considered to be +-- tenant tables even if the GUC was set to off when creating the table. +CREATE TABLE tenant_5.tbl_3(a int, b text); +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=1 FROM pg_dist_partition WHERE logicalrelid = 'tenant_5.tbl_3'::regclass; +$$); + +SET citus.enable_schema_based_sharding TO ON; + +-- Verify that tables that belong to tenant_4 and tenant_5 are stored on +-- different worker nodes due to order we followed when creating first tenant +-- tables in each of them. +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(DISTINCT(nodename, nodeport))=2 FROM citus_shards +WHERE table_name IN ('tenant_4.tbl_1'::regclass, 'tenant_5.tbl_1'::regclass); +$$); + +-- show that all the tables in tenant_4 are colocated with each other. +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(DISTINCT(colocationid))=1 FROM pg_dist_partition +WHERE logicalrelid::regclass::text LIKE 'tenant_4.%'; +$$); + +-- verify the same for tenant_5 too +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(DISTINCT(colocationid))=1 FROM pg_dist_partition +WHERE logicalrelid::regclass::text LIKE 'tenant_5.%'; +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT schemaid INTO tenant_4_schemaid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_4' +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid INTO tenant_4_colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_4' +$$); + +SET client_min_messages TO WARNING; + +-- Rename it to a name that contains a single quote to verify that we properly +-- escape its name when sending the command to delete the pg_dist_schema +-- entry on workers. +ALTER SCHEMA tenant_4 RENAME TO "tenant\'_4"; + +DROP SCHEMA "tenant\'_4", "CiTuS.TeeN_108" CASCADE; + +SET client_min_messages TO NOTICE; + +-- Verify that dropping a tenant schema deletes the associated +-- pg_dist_schema entry and pg_dist_colocation too. +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE schemaid = (SELECT schemaid FROM tenant_4_schemaid) +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE schemaid = (SELECT schemaid FROM citus_teen_schemaid) +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE colocationid = (SELECT colocationid FROM tenant_4_colocationid) +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE colocationid = (SELECT colocationid FROM citus_teen_colocationid) +$$); + +SELECT result FROM run_command_on_all_nodes($$ + DROP TABLE tenant_4_schemaid, citus_teen_schemaid, tenant_4_colocationid, citus_teen_colocationid +$$); + +\c - - - :master_port + +SET client_min_messages TO NOTICE; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2050400;$$); + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; + +-- show that we don't allow colocating a Citus table with a tenant table +CREATE TABLE regular_schema.null_shard_key_1(a int, b text); +SELECT create_distributed_table('regular_schema.null_shard_key_1', null, colocate_with => 'tenant_5.tbl_2'); +SELECT create_distributed_table('regular_schema.null_shard_key_1', 'a', colocate_with => 'tenant_5.tbl_2'); + +CREATE TABLE regular_schema.null_shard_key_table_2(a int, b text); +SELECT create_distributed_table('regular_schema.null_shard_key_table_2', null); + +-- let's switch to a different worker node for the rest of the tests +\c - - - :worker_2_port + +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2050500;$$); + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; + +-- Show that we don't chose to colocate regular single-shard tables with +-- tenant tables by default. +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=0 FROM pg_dist_schema WHERE colocationid = ( + SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'regular_schema.null_shard_key_table_2'::regclass +); +$$); + +-- save the colocation id used for tenant_5 +SELECT colocationid AS tenant_5_old_colocationid FROM pg_dist_schema +WHERE schemaid::regnamespace::text = 'tenant_5' \gset + +-- drop all the tables that belong to tenant_5 and create a new one +DROP TABLE tenant_5.tbl_1, tenant_5.tbl_2, tenant_5.tbl_3; +CREATE TABLE tenant_5.tbl_4(a int, b text); + +-- verify that tenant_5 is still associated with the same colocation id +SELECT format( + 'SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid = %s FROM pg_dist_schema + WHERE schemaid::regnamespace::text = ''tenant_5''; + $$);', +:tenant_5_old_colocationid) AS verify_all_nodes_query \gset + +:verify_all_nodes_query + +SELECT result FROM run_command_on_all_nodes($$ + SELECT schemaid INTO tenant_1_schemaid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_1' +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT schemaid INTO tenant_2_schemaid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_2' +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid INTO tenant_1_colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_1' +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid INTO tenant_2_colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_2' +$$); + +SET client_min_messages TO WARNING; +SET citus.enable_schema_based_sharding TO OFF; + +DROP SCHEMA tenant_1 CASCADE; + +CREATE ROLE test_non_super_user; +ALTER ROLE test_non_super_user NOSUPERUSER; + +ALTER SCHEMA tenant_2 OWNER TO non_existing_role; +ALTER SCHEMA tenant_2 OWNER TO test_non_super_user; + +select result from run_command_on_all_nodes ($$ + SELECT pg_get_userbyid(nspowner) AS schema_owner + FROM pg_namespace + WHERE nspname = 'tenant_2' +$$); + +\c - - - :master_port + +SET client_min_messages TO WARNING; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2050600;$$); + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; + +DROP OWNED BY test_non_super_user CASCADE; + +\c - - - :worker_2_port + +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2050700;$$); + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; + +DROP ROLE test_non_super_user; + +-- Verify that dropping a tenant schema always deletes +-- the associated pg_dist_schema entry even if the the schema was +-- dropped while the GUC was set to off. +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE schemaid IN (SELECT schemaid FROM tenant_1_schemaid UNION SELECT schemaid FROM tenant_2_schemaid) +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE colocationid IN (SELECT colocationid FROM tenant_1_colocationid UNION SELECT colocationid FROM tenant_2_colocationid) +$$); + +SELECT result FROM run_command_on_all_nodes($$ + DROP TABLE tenant_1_schemaid, tenant_2_schemaid, tenant_1_colocationid, tenant_2_colocationid +$$); + +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; + +-- show that all schemaid values are unique and non-null in pg_dist_schema +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=0 FROM pg_dist_schema WHERE schemaid IS NULL; +SELECT (SELECT COUNT(*) FROM pg_dist_schema) = + (SELECT COUNT(DISTINCT(schemaid)) FROM pg_dist_schema); +$$); + +-- show that all colocationid values are unique and non-null in pg_dist_schema +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=0 FROM pg_dist_schema WHERE colocationid IS NULL; +SELECT (SELECT COUNT(*) FROM pg_dist_schema) = + (SELECT COUNT(DISTINCT(colocationid)) FROM pg_dist_schema); +$$); + +CREATE TABLE public.cannot_be_a_tenant_table(a int, b text); + +-- show that we don't consider public schema as a tenant schema +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=0 FROM pg_dist_schema WHERE schemaid::regnamespace::text = 'public'; +$$); + +DROP TABLE public.cannot_be_a_tenant_table; + +CREATE TEMPORARY TABLE temp_table(a int, b text); + +-- show that we don't consider temporary schemas as tenant schemas +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=0 FROM pg_dist_schema WHERE schemaid::regnamespace::text = '%pg_temp%'; +$$); + +DROP TABLE temp_table; + +-- test creating a tenant schema and a tenant table for it in the same transaction +BEGIN; + CREATE SCHEMA tenant_7; + CREATE TABLE tenant_7.tbl_1(a int, b text); + CREATE TABLE tenant_7.tbl_2(a int, b text); + + SELECT colocationid = ( + SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_7.tbl_1'::regclass + ) + FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_7'; + + -- make sure that both tables created in tenant_7 are colocated + SELECT COUNT(DISTINCT(colocationid)) = 1 FROM pg_dist_partition + WHERE logicalrelid IN ('tenant_7.tbl_1'::regclass, 'tenant_7.tbl_2'::regclass); +COMMIT; + +-- Test creating a tenant schema and a tenant table for it in the same transaction +-- but this time rollback the transaction. +BEGIN; + CREATE SCHEMA tenant_8; + CREATE TABLE tenant_8.tbl_1(a int, b text); + CREATE TABLE tenant_8.tbl_2(a int, b text); +ROLLBACK; + +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=0 FROM pg_dist_schema WHERE schemaid::regnamespace::text = 'tenant_8'; +$$); + +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=0 FROM pg_dist_partition WHERE logicalrelid::text LIKE 'tenant_8.%'; +$$); + +-- Verify that citus.enable_schema_based_sharding and citus.use_citus_managed_tables +-- GUC don't interfere with each other when creating a table in tenant schema. +-- +-- In utility hook, we check whether the CREATE TABLE command is issued on a tenant +-- schema before checking whether citus.use_citus_managed_tables is set to ON to +-- avoid converting the table into a Citus managed table unnecessarily. +-- +-- If the CREATE TABLE command is issued on a tenant schema, we skip the check +-- for citus.use_citus_managed_tables. +SET citus.use_citus_managed_tables TO ON; +CREATE TABLE tenant_7.tbl_3(a int, b text, PRIMARY KEY(a)); +RESET citus.use_citus_managed_tables; + +-- Verify that we don't unnecessarily convert a table into a Citus managed +-- table when creating it with a pre-defined foreign key to a reference table. + +-- Notice that tenant_7.tbl_4 have foreign keys both to tenant_7.tbl_3 and +-- to reference_table. +CREATE TABLE tenant_7.tbl_4(a int REFERENCES regular_schema.reference_table, FOREIGN KEY(a) REFERENCES tenant_7.tbl_3(a) ON DELETE CASCADE); + +INSERT INTO tenant_7.tbl_3 VALUES (1, 'a'), (2, 'b'), (3, 'c'); +INSERT INTO regular_schema.reference_table VALUES (1), (2), (3); +INSERT INTO tenant_7.tbl_4 VALUES (1), (2), (3); + +DELETE FROM tenant_7.tbl_3 WHERE a < 3; +SELECT * FROM tenant_7.tbl_4 ORDER BY a; + +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=2 FROM pg_dist_partition +WHERE logicalrelid IN ('tenant_7.tbl_3'::regclass, 'tenant_7.tbl_4'::regclass) AND + partmethod = 'n' AND repmodel = 's' AND + colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'tenant_7.tbl_1'::regclass); +$$); + +CREATE TABLE local_table(a int PRIMARY KEY); + +-- fails because tenant tables cannot have foreign keys to local tables +CREATE TABLE tenant_7.tbl_5(a int REFERENCES local_table(a)); + +-- Fails because tenant tables cannot have foreign keys to tenant tables +-- that belong to different tenant schemas. +CREATE TABLE tenant_5.tbl_5(a int, b text, FOREIGN KEY(a) REFERENCES tenant_7.tbl_3(a)); + +CREATE SCHEMA tenant_9; + +SELECT result FROM run_command_on_all_nodes($$ + SELECT schemaid INTO tenant_9_schemaid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_9' +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid INTO tenant_9_colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_9' +$$); + +DROP SCHEMA tenant_9; + +-- Make sure that dropping an empty tenant schema +-- doesn't leave any dangling entries in pg_dist_schema and +-- pg_dist_colocation. +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE schemaid = (SELECT schemaid FROM tenant_9_schemaid) +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_colocation + WHERE colocationid = (SELECT colocationid FROM tenant_9_colocationid) +$$); + +SELECT result FROM run_command_on_all_nodes($$ + DROP TABLE tenant_9_schemaid, tenant_9_colocationid +$$); + +CREATE TABLE tenant_3.search_path_test(a int); +INSERT INTO tenant_3.search_path_test VALUES (1), (10); + +CREATE TABLE tenant_5.search_path_test(a int); +INSERT INTO tenant_5.search_path_test VALUES (2); + +CREATE TABLE tenant_7.search_path_test(a int); +INSERT INTO tenant_7.search_path_test VALUES (3); + +SET search_path TO tenant_5; + +PREPARE list_tuples AS SELECT * FROM search_path_test ORDER BY a; + +SELECT * FROM search_path_test ORDER BY a; + +SET search_path TO tenant_3; +DELETE FROM search_path_test WHERE a = 1; +SELECT * FROM search_path_test ORDER BY a; +SELECT regular_schema.increment_one(); +EXECUTE list_tuples; + +SET search_path TO tenant_7; +DROP TABLE search_path_test; +SELECT * FROM pg_dist_partition WHERE logicalrelid::text = 'search_path_test'; + +SET search_path TO tenant_5; +SELECT regular_schema.decrement_one(); +EXECUTE list_tuples; + +SET search_path TO regular_schema; + +CREATE USER test_other_super_user WITH superuser; + +\c - test_other_super_user + +SET citus.enable_schema_based_sharding TO ON; +CREATE SCHEMA tenant_9; + +\c - postgres + +SET search_path TO regular_schema; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2060000;$$); + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +SET client_min_messages TO NOTICE; +SET citus.enable_schema_based_sharding TO ON; + +SELECT result FROM run_command_on_all_nodes($$ + SELECT schemaid INTO tenant_9_schemaid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_9' +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid INTO tenant_9_colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_9' +$$); + +\c - - - :master_port + +SET client_min_messages TO WARNING; +DROP OWNED BY test_other_super_user; + +\c - - - :worker_2_port + +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2060100;$$); + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +SET client_min_messages TO NOTICE; +SET citus.enable_schema_based_sharding TO ON; + +-- Make sure that dropping an empty tenant schema +-- (via DROP OWNED BY) doesn't leave any dangling entries in +-- pg_dist_schema and pg_dist_colocation. +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE schemaid = (SELECT schemaid FROM tenant_9_schemaid) +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_colocation + WHERE colocationid = (SELECT colocationid FROM tenant_9_colocationid) +$$); + +SELECT result FROM run_command_on_all_nodes($$ + DROP TABLE tenant_9_schemaid, tenant_9_colocationid +$$); + +DROP USER test_other_super_user; + +CREATE ROLE test_non_super_user WITH LOGIN; +ALTER ROLE test_non_super_user NOSUPERUSER; + +\c - - - :master_port + +GRANT CREATE ON DATABASE regression TO test_non_super_user; + +GRANT CREATE ON SCHEMA public TO test_non_super_user ; + +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2070000;$$); + +\c - test_non_super_user - :worker_2_port + +SET search_path TO regular_schema; + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +SET client_min_messages TO NOTICE; +SET citus.enable_schema_based_sharding TO ON; + +-- test create / drop tenant schema / table + +CREATE SCHEMA tenant_10; +CREATE TABLE tenant_10.tbl_1(a int, b text); +CREATE TABLE tenant_10.tbl_2(a int, b text); + +DROP TABLE tenant_10.tbl_2; + +CREATE SCHEMA tenant_11; + +SELECT result FROM run_command_on_all_nodes($$ + SELECT schemaid INTO tenant_10_schemaid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_10' +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT schemaid INTO tenant_11_schemaid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_11' +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid INTO tenant_10_colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_10' +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT colocationid INTO tenant_11_colocationid FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'tenant_11' +$$); + +-- Verify metadata for tenant schemas that are created via non-super-user. +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(DISTINCT(schemaid))=2 FROM pg_dist_schema + WHERE schemaid IN (SELECT schemaid FROM tenant_10_schemaid UNION SELECT schemaid FROM tenant_11_schemaid) +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(DISTINCT(colocationid))=2 FROM pg_dist_schema + WHERE colocationid IN (SELECT colocationid FROM tenant_10_colocationid UNION SELECT colocationid FROM tenant_11_colocationid) +$$); + +SET client_min_messages TO WARNING; +DROP SCHEMA tenant_10, tenant_11 CASCADE; +SET client_min_messages TO NOTICE; + +-- Verify that dropping a tenant schema via non-super-user +-- deletes the associated pg_dist_schema entry. +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_schema + WHERE schemaid IN (SELECT schemaid FROM tenant_10_schemaid UNION SELECT schemaid FROM tenant_11_schemaid) +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM pg_dist_colocation + WHERE colocationid IN (SELECT colocationid FROM tenant_10_colocationid UNION SELECT colocationid FROM tenant_11_colocationid) +$$); + +SELECT result FROM run_command_on_all_nodes($$ + DROP TABLE tenant_10_schemaid, tenant_11_schemaid, tenant_10_colocationid, tenant_11_colocationid +$$); + +\c - postgres - :master_port + +REVOKE CREATE ON DATABASE regression FROM test_non_super_user; + +REVOKE CREATE ON SCHEMA public FROM test_non_super_user; + +DROP ROLE test_non_super_user; + +-- Enable the GUC on all nodes to make sure that the CREATE SCHEMA/ TABLE +-- commands that we send to workers don't recursively try creating a +-- tenant schema / table. + +\c - - - :master_port + +ALTER SYSTEM SET citus.enable_schema_based_sharding TO ON; +SELECT pg_reload_conf(); + +\c - - - :worker_1_port + +ALTER SYSTEM SET citus.enable_schema_based_sharding TO ON; +SELECT pg_reload_conf(); + +\c - - - :worker_2_port + +ALTER SYSTEM SET citus.enable_schema_based_sharding TO ON; +SELECT pg_reload_conf(); + +-- Verify that citus_internal.unregister_tenant_schema_globally can be called +-- from workers too, but it will fail for this case as we didn't yet drop the +-- schema. +SELECT citus_internal.unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); + +SET search_path TO regular_schema; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2080000;$$); + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +SET client_min_messages TO NOTICE; + +CREATE TABLE tenant_3.tbl_1(a int, b text); + +SET citus.enable_schema_based_sharding TO ON; +CREATE SCHEMA tenant_6; +CREATE TABLE tenant_6.tbl_1(a int, b text); + +-- verify pg_dist_partition entries for tenant_3.tbl_1 and tenant_6.tbl_1 +SELECT result FROM run_command_on_all_nodes($$ +SELECT COUNT(*)=2 FROM pg_dist_partition +WHERE logicalrelid IN ('tenant_3.tbl_1'::regclass, 'tenant_6.tbl_1'::regclass) AND + partmethod = 'n' AND repmodel = 's' AND colocationid > 0; +$$); + +\c - - - :master_port + +ALTER SYSTEM RESET citus.enable_schema_based_sharding; +SELECT pg_reload_conf(); + +\c - - - :worker_1_port + +ALTER SYSTEM RESET citus.enable_schema_based_sharding; +SELECT pg_reload_conf(); + +\c - - - :worker_2_port +SET search_path TO regular_schema; + +SET citus.enable_schema_based_sharding TO ON; +SET search_path TO regular_schema; +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2080200;$$); + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +SET client_min_messages TO NOTICE; + +CREATE SCHEMA type_sch; +CREATE TABLE type_sch.tbl (a INT); + +SELECT result FROM run_command_on_all_nodes($$ +SELECT jsonb_agg( + jsonb_build_object( + 'table_name', table_name, + 'citus_table_type', citus_table_type + ) + ORDER BY table_name::text + ) +FROM public.citus_tables WHERE table_name::text LIKE 'type_sch.tbl'; +$$); + +SELECT format( + 'SELECT result FROM run_command_on_all_nodes($$ + SELECT jsonb_agg( + jsonb_build_object( + ''table_name'', table_name, + ''citus_table_type'', citus_table_type + ) + ORDER BY table_name::text + ) + FROM citus_shards WHERE table_name::text LIKE ''type_sch.tbl'' AND nodeport IN (%s, %s); + $$);', +:worker_1_port, :worker_2_port) AS verify_all_nodes_query \gset + +:verify_all_nodes_query + +RESET citus.enable_schema_based_sharding; + +-- test citus_schemas +SET citus.enable_schema_based_sharding TO ON; +CREATE USER citus_schema_role SUPERUSER; + +SET ROLE citus_schema_role; + +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2080400;$$); + +CREATE SCHEMA citus_sch1; +CREATE TABLE citus_sch1.tbl1(a INT); +CREATE TABLE citus_sch1.tbl2(a INT); + +RESET ROLE; + +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2080500;$$); + +CREATE SCHEMA citus_sch2; +CREATE TABLE citus_sch2.tbl1(a INT); +SET citus.enable_schema_based_sharding TO OFF; + +INSERT INTO citus_sch1.tbl1 SELECT * FROM generate_series(1, 10000); +INSERT INTO citus_sch1.tbl2 SELECT * FROM generate_series(1, 5000); + +INSERT INTO citus_sch2.tbl1 SELECT * FROM generate_series(1, 12000); + +SELECT result FROM run_command_on_all_nodes($$ + + +SELECT jsonb_agg( + jsonb_build_object( + 'schema_name', cs.schema_name, + 'correct_colocation_id', cs.colocation_id = ctc.colocation_id, + 'correct_size', cs.schema_size = ctc.calculated_size, + 'schema_owner', cs.schema_owner + ) + ORDER BY schema_name::text + ) +FROM public.citus_schemas cs +JOIN +( + SELECT + c.relnamespace, ct.colocation_id, + pg_size_pretty(sum(citus_total_relation_size(ct.table_name))) AS calculated_size + FROM public.citus_tables ct, pg_class c + WHERE ct.table_name::oid = c.oid + GROUP BY 1, 2 +) ctc ON cs.schema_name = ctc.relnamespace +WHERE cs.schema_name::text LIKE 'citus\_sch_' +$$); + +-- test empty schema and empty tables +SET citus.enable_schema_based_sharding TO ON; +CREATE SCHEMA citus_empty_sch1; + +CREATE SCHEMA citus_empty_sch2; +CREATE TABLE citus_empty_sch2.tbl1(a INT); +SET citus.enable_schema_based_sharding TO OFF; + +SELECT result FROM run_command_on_all_nodes($$ +SELECT jsonb_agg( + jsonb_build_object( + 'schema_name', schema_name, + 'schema_size', schema_size + ) + ORDER BY schema_name::text + ) +FROM public.citus_schemas +WHERE schema_name::text LIKE 'citus\_empty\_sch_'; +$$); + +-- test with non-privileged role +CREATE USER citus_schema_nonpri; +SET ROLE citus_schema_nonpri; + +SET client_min_messages TO ERROR; +SELECT result FROM run_command_on_all_nodes($$ +SELECT jsonb_agg( + jsonb_build_object( + 'schema_name', schema_name, + 'colocation_id_visible', colocation_id > 0, + 'schema_size_visible', schema_size IS NOT NULL, + 'schema_owner', schema_owner + ) + ORDER BY schema_name::text + ) +FROM public.citus_schemas WHERE schema_name::text LIKE 'citus\_sch_'; +$$); + +RESET client_min_messages; +RESET ROLE; + +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2080600;$$); + +-- test we handle create schema with authorization properly for distributed schema +SET citus.enable_schema_based_sharding TO ON; +CREATE ROLE authschema; +CREATE SCHEMA AUTHORIZATION authschema; +SET citus.enable_schema_based_sharding TO OFF; + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=1 + FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'authschema'; +$$); + +-- mat view can be created under tenant schema +SET citus.enable_schema_based_sharding TO ON; +SET citus.shard_replication_factor TO 1; +CREATE SCHEMA sc1; +CREATE TABLE sc1.t1 (a int); +CREATE MATERIALIZED VIEW sc1.v1 AS SELECT * FROM sc1.t1; +SET citus.enable_schema_based_sharding TO OFF; + +SELECT result FROM run_command_on_all_nodes($$ +SELECT colocationid > 0 FROM pg_dist_schema +WHERE schemaid::regnamespace::text = 'sc1'; +$$); + +SET client_min_messages TO WARNING; +DROP TABLE public.local_table; + +-- On all nodes, save metadata records related to regular_schema and regular_schema_1 +-- for later verification of cleanup after dropping these propagated schemas from +-- workers. +SELECT result FROM run_command_on_all_nodes($$ + SELECT logicalrelid INTO expect_pg_dist_partition_cleanup + FROM pg_dist_partition + JOIN pg_class ON logicalrelid = pg_class.oid + JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid + WHERE pg_namespace.nspname IN ('regular_schema', 'regular_schema_1'); +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT shardid INTO expect_pg_dist_shard_cleanup + FROM pg_dist_shard + JOIN pg_dist_partition ON pg_dist_shard.logicalrelid = pg_dist_partition.logicalrelid + JOIN pg_class ON pg_dist_partition.logicalrelid = pg_class.oid + JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid + WHERE pg_namespace.nspname IN ('regular_schema', 'regular_schema_1'); +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT placementid INTO expect_pg_dist_placement_cleanup + FROM pg_dist_placement + JOIN pg_dist_shard ON pg_dist_placement.shardid = pg_dist_shard.shardid + JOIN pg_dist_partition ON pg_dist_shard.logicalrelid = pg_dist_partition.logicalrelid + JOIN pg_class ON pg_dist_partition.logicalrelid = pg_class.oid + JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid + WHERE pg_namespace.nspname IN ('regular_schema', 'regular_schema_1'); +$$); + +CREATE SCHEMA local_schema; + +-- show that we allow dropping distributed schemas from workers together with +-- regular propagated schemas +DROP SCHEMA tenant_5, regular_schema, tenant_3, local_schema CASCADE; + +-- cannot drop non-schema-distributed tables together with schema-distributed tables from workers +DROP TABLE tenant_7.tbl_1, regular_schema_1.dist_table; + +-- can drop tables from multiple distributed schemas together +DROP TABLE IF EXISTS tenant_7.tbl_1, tenant_6.tbl_1, tenant_7.tbl_2, tenant_7.tbl_3, tenant_7.tbl_4, does_not_exists; + +-- can drop multiple distributed schemas together +DROP SCHEMA tenant_7, tenant_6, type_sch, citus_sch1, citus_sch2, citus_empty_sch1, citus_empty_sch2, authschema, sc1 CASCADE; + +-- can drop a regular propagated schema from worker too +DROP SCHEMA regular_schema_1 CASCADE; + +DROP ROLE citus_schema_role, citus_schema_nonpri, authschema; + +-- verify that metadata related to regular_schema and regular_schema_1 +-- are cleaned up properly on all nodes +SELECT result FROM run_command_on_all_nodes($$ + SELECT 0 = ( + SELECT COUNT(*) FROM pg_dist_partition + JOIN expect_pg_dist_partition_cleanup + ON pg_dist_partition.logicalrelid = expect_pg_dist_partition_cleanup.logicalrelid + ) + ( + SELECT COUNT(*) FROM pg_dist_shard + JOIN expect_pg_dist_shard_cleanup + ON pg_dist_shard.shardid = expect_pg_dist_shard_cleanup.shardid + ) + ( + SELECT COUNT(*) FROM pg_dist_placement + JOIN expect_pg_dist_placement_cleanup + ON pg_dist_placement.placementid = expect_pg_dist_placement_cleanup.placementid + ); +$$); + +SELECT result FROM run_command_on_all_nodes($$ + DROP TABLE expect_pg_dist_partition_cleanup, + expect_pg_dist_shard_cleanup, + expect_pg_dist_placement_cleanup; +$$); + +\c - - - :master_port + +SET client_min_messages TO WARNING; + +SELECT citus_remove_node('localhost', :master_port); + +-- reset pg_dist_shardid_seq on the coordinator +DO $proc$ +DECLARE + v_last_value bigint; +BEGIN + SELECT last_value INTO v_last_value FROM pg_dist_shardid_seq_prev_state; + EXECUTE format('ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH %s', v_last_value); +END$proc$; + +DROP TABLE pg_dist_shardid_seq_prev_state; diff --git a/src/test/regress/sql/schema_based_sharding_from_workers_b.sql b/src/test/regress/sql/schema_based_sharding_from_workers_b.sql new file mode 100644 index 00000000000..ceea7ab69fd --- /dev/null +++ b/src/test/regress/sql/schema_based_sharding_from_workers_b.sql @@ -0,0 +1,1756 @@ +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0); +SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true); + +-- Remove the workers and add them with the groupids that we would assign at this point +-- of multi_1_schedule so when we run this test file individually, we still produce +-- the same sequence values when inserting into distributed tables using sequences from +-- workers. +SELECT 1 FROM citus_remove_node('localhost', :worker_1_port); +SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); +SELECT 1 FROM citus_add_node('localhost', :worker_1_port, groupid => 33); +SELECT 1 FROM citus_add_node('localhost', :worker_2_port, groupid => 47); + +SET citus.next_shard_id TO 2090000; +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +SET client_min_messages TO WARNING; + +CREATE SCHEMA regular_schema; + +CREATE FUNCTION create_citus_local_with_data(table_name text) +RETURNS void +LANGUAGE plpgsql +AS $func$ +BEGIN + EXECUTE format(' + CREATE TABLE regular_schema.%I ( + col_1 text, + col_2 int, + col_3 bigint GENERATED BY DEFAULT AS IDENTITY (START WITH 1000 INCREMENT BY 1000), + col_4 timestamp, + col_5 int, + col_6 bigint GENERATED ALWAYS as (col_5 * 7) stored, + col_7 numeric, + col_8 text GENERATED ALWAYS as (col_1 || ''_dummy'') stored, + col_9 bigint GENERATED ALWAYS AS IDENTITY (START WITH 100 INCREMENT BY 100) + );', table_name); + + EXECUTE format(' + INSERT INTO regular_schema.%I (col_1, col_2, col_4, col_5, col_7) + SELECT + i::text, -- col_1 + i + 42, -- col_2 + ''2026-01-01 00:00:00''::timestamp + (i || '' seconds'')::interval, -- col_4 + i * 3, -- col_5 + (i * 1.5)::numeric -- col_7 + FROM generate_series(1, 1000) AS i;', table_name); + + EXECUTE format(' + INSERT INTO regular_schema.%I (col_1, col_2, col_3, col_4, col_5, col_7) + OVERRIDING SYSTEM VALUE + SELECT + i::text, -- col_1 + i + 42, -- col_2 + 1000 + i, -- col_3 + ''2026-01-01 00:00:00''::timestamp + (i || '' seconds'')::interval, -- col_4 + i * 3, -- col_5 + (i * 1.5)::numeric -- col_7 + FROM generate_series(1001, 2000) AS i;', table_name); + + EXECUTE format(' + SELECT citus_add_local_table_to_metadata(''regular_schema.%I'');', table_name); + + EXECUTE format(' + ALTER TABLE regular_schema.%I DROP COLUMN col_2;', table_name); + + EXECUTE format(' + ALTER TABLE regular_schema.%I DROP COLUMN col_7;', table_name); + + EXECUTE format(' + ALTER TABLE regular_schema.%I ADD COLUMN col_10 bigint DEFAULT -197;', table_name); +END; +$func$; + +SELECT create_citus_local_with_data('citus_local_1'); +SELECT create_citus_local_with_data('citus_local_2'); +SELECT create_citus_local_with_data('citus_local_3'); + +SELECT create_citus_local_with_data('citus_local_4'); +SELECT create_citus_local_with_data('citus_local_5'); +SELECT create_citus_local_with_data('citus_local_6'); + +SELECT * INTO regular_schema.old_data_coordinator FROM regular_schema.citus_local_4; + +SET citus.enable_schema_based_sharding TO ON; + +CREATE SCHEMA tenant_4; +CREATE SCHEMA tenant_5; +CREATE SCHEMA tenant_6; + +SET citus.enable_schema_based_sharding TO OFF; + +-- Verify data consistency after moving to a distributed schema. +-- +-- Repeat this for three different schemas to test copying data +-- i) to shards on different workers and ii) to a shard on the +-- coordinator, i.e., this node. +-- +-- First, test this within a transaction block (and rollback) and then +-- outside of a transaction block. +BEGIN; + ALTER TABLE regular_schema.citus_local_4 SET SCHEMA tenant_4; + SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_4.citus_local_4) EXCEPT (TABLE regular_schema.old_data_coordinator) + UNION + (TABLE regular_schema.old_data_coordinator EXCEPT TABLE tenant_4.citus_local_4) + ); +ROLLBACK; + +ALTER TABLE regular_schema.citus_local_4 SET SCHEMA tenant_4; +SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_4.citus_local_4) EXCEPT (TABLE regular_schema.old_data_coordinator) + UNION + (TABLE regular_schema.old_data_coordinator EXCEPT TABLE tenant_4.citus_local_4) +); + +BEGIN; + ALTER TABLE regular_schema.citus_local_5 SET SCHEMA tenant_5; + SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_5.citus_local_5) EXCEPT (TABLE regular_schema.old_data_coordinator) + UNION + (TABLE regular_schema.old_data_coordinator EXCEPT TABLE tenant_5.citus_local_5) + ); +ROLLBACK; + +ALTER TABLE regular_schema.citus_local_5 SET SCHEMA tenant_5; +SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_5.citus_local_5) EXCEPT (TABLE regular_schema.old_data_coordinator) + UNION + (TABLE regular_schema.old_data_coordinator EXCEPT TABLE tenant_5.citus_local_5) +); + +BEGIN; + ALTER TABLE regular_schema.citus_local_6 SET SCHEMA tenant_6; + SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_6.citus_local_6) EXCEPT (TABLE regular_schema.old_data_coordinator) + UNION + (TABLE regular_schema.old_data_coordinator EXCEPT TABLE tenant_6.citus_local_6) + ); +ROLLBACK; + +ALTER TABLE regular_schema.citus_local_6 SET SCHEMA tenant_6; +SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_6.citus_local_6) EXCEPT (TABLE regular_schema.old_data_coordinator) + UNION + (TABLE regular_schema.old_data_coordinator EXCEPT TABLE tenant_6.citus_local_6) +); + +CREATE TABLE regular_schema.reference_table (id bigint PRIMARY KEY); +SELECT create_reference_table('regular_schema.reference_table'); + +CREATE TABLE regular_schema.distributed_table (id int, text_col text); +SELECT create_distributed_table('regular_schema.distributed_table', 'id'); +INSERT INTO regular_schema.distributed_table SELECT i, 'text_' || i FROM generate_series(1, 1000) AS i; + +CREATE OR REPLACE FUNCTION get_sequence_info(seq regclass) +RETURNS TABLE ( + type_name text, + min_value bigint, + max_value bigint, + start_value bigint, + last_value bigint +) +AS $func$ +DECLARE + v_last_value bigint; +BEGIN + EXECUTE format('SELECT last_value FROM %s', seq::regclass) INTO v_last_value; + + RETURN QUERY + SELECT seqtypid::regtype::text, seqmin, seqmax, seqstart, v_last_value + FROM pg_sequence + WHERE seqrelid = seq; +END; +$func$ LANGUAGE plpgsql; + +-- When creating a tenant table from workers, we always fetch the next shard id +-- and placement id from the coordinator because we never sync those sequences to +-- workers. For this reason, along this test file, we always set the next shard id +-- on the coordinator when needed, rather than setting it on the current worker node. +-- +-- Note that setting citus.next_shard_id on the coordinator would not work if the +-- citus internal connection we use to execute master_get_new_shardid() on the +-- coordinator changes because the underlying function, GetNextShardIdInternal(), +-- just increments NextShardId for the current session. For this reason, we instead +-- set pg_dist_shardid_seq on the coordinator in the tests where we test creating +-- distributed tables from a worker and where we want to use consistent shard ids. +-- +-- At the end of the test file, we reset pg_dist_shardid_seq. +SELECT last_value::bigint INTO pg_dist_shardid_seq_prev_state FROM pg_catalog.pg_dist_shardid_seq; + +\c - - - :worker_1_port +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2091000;$$); + +SET citus.shard_count TO 32; +SET citus.shard_replication_factor TO 1; +SET client_min_messages TO WARNING; + +SELECT * INTO regular_schema.old_data_worker FROM regular_schema.citus_local_1; + +SET citus.enable_schema_based_sharding TO ON; + +CREATE SCHEMA tenant_1; +CREATE SCHEMA tenant_2; +CREATE SCHEMA tenant_3; + +-- Verify data consistency after moving to a distributed schema. +-- +-- Repeat this for three different schemas to test copying data +-- i) to a shard on this worker, ii) to a shard on another worker, and +-- iii) to a shard on the coordinator. +-- +-- First, test this within a transaction block (and rollback) and then +-- outside of a transaction block. +BEGIN; + -- lock the table early in the transaction to make sure we don't break in that case + LOCK TABLE regular_schema.citus_local_1 IN ACCESS EXCLUSIVE MODE; + + ALTER TABLE regular_schema.citus_local_1 SET SCHEMA tenant_1; + SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_1.citus_local_1) EXCEPT (TABLE regular_schema.old_data_worker) + UNION + (TABLE regular_schema.old_data_worker EXCEPT TABLE tenant_1.citus_local_1) + ); +ROLLBACK; + +ALTER TABLE regular_schema.citus_local_1 SET SCHEMA tenant_1; +SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_1.citus_local_1) EXCEPT (TABLE regular_schema.old_data_worker) + UNION + (TABLE regular_schema.old_data_worker EXCEPT TABLE tenant_1.citus_local_1) +); + +BEGIN; + ALTER TABLE regular_schema.citus_local_2 SET SCHEMA tenant_2; + SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_2.citus_local_2) EXCEPT (TABLE regular_schema.old_data_worker) + UNION + (TABLE regular_schema.old_data_worker EXCEPT TABLE tenant_2.citus_local_2) + ); +ROLLBACK; + +ALTER TABLE regular_schema.citus_local_2 SET SCHEMA tenant_2; +SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_2.citus_local_2) EXCEPT (TABLE regular_schema.old_data_worker) + UNION + (TABLE regular_schema.old_data_worker EXCEPT TABLE tenant_2.citus_local_2) +); + +BEGIN; + ALTER TABLE regular_schema.citus_local_3 SET SCHEMA tenant_3; + SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_3.citus_local_3) EXCEPT (TABLE regular_schema.old_data_worker) + UNION + (TABLE regular_schema.old_data_worker EXCEPT TABLE tenant_3.citus_local_3) + ); +ROLLBACK; + +ALTER TABLE regular_schema.citus_local_3 SET SCHEMA tenant_3; +SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_3.citus_local_3) EXCEPT (TABLE regular_schema.old_data_worker) + UNION + (TABLE regular_schema.old_data_worker EXCEPT TABLE tenant_3.citus_local_3) +); + +CREATE TABLE regular_schema.local_table_1 ( + col_1 int, + col_2 text, + col_3 text GENERATED ALWAYS AS (col_1::text || '_gen') stored, + col_4 bigint DEFAULT 42, + col_5 bigint GENERATED BY DEFAULT AS IDENTITY (START WITH 1 INCREMENT BY 1), + col_6 int GENERATED ALWAYS as (col_1 * 2) stored +); + +INSERT INTO regular_schema.local_table_1 (col_1, col_2, col_4, col_5) +OVERRIDING SYSTEM VALUE +SELECT + i, -- col_1 + 'text_' || i, -- col_2 + i * 10, -- col_4 + 100 + i -- col_5 +FROM generate_series(1, 1000) AS i; + +ALTER TABLE regular_schema.local_table_1 DROP COLUMN col_2; +ALTER TABLE regular_schema.local_table_1 DROP COLUMN col_5; + +SELECT * INTO regular_schema.old_local_table_1 FROM regular_schema.local_table_1; + +CREATE SCHEMA tenant_7; + +-- test the same using a local table on this worker node +BEGIN; + ALTER TABLE regular_schema.local_table_1 SET SCHEMA tenant_7; + + SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_7.local_table_1) EXCEPT (TABLE regular_schema.old_local_table_1) + UNION + (TABLE regular_schema.old_local_table_1 EXCEPT TABLE tenant_7.local_table_1) + ); +ROLLBACK; + +ALTER TABLE regular_schema.local_table_1 SET SCHEMA tenant_7; + +SELECT COUNT(*) = 0 FROM ( + (TABLE tenant_7.local_table_1) EXCEPT (TABLE regular_schema.old_local_table_1) + UNION + (TABLE regular_schema.old_local_table_1 EXCEPT TABLE tenant_7.local_table_1) +); + +SELECT pg_catalog.pg_table_size('tenant_7.local_table_1'::regclass) > 8192 as shell_table_has_data; +SELECT COUNT(*)=1000 FROM tenant_7.local_table_1; + +SELECT truncate_local_data_after_distributing_table('tenant_7.local_table_1'); + +SELECT pg_catalog.pg_table_size('tenant_7.local_table_1'::regclass) = 8192 as shell_table_doesnt_have_data; +SELECT COUNT(*)=1000 FROM tenant_7.local_table_1; + +CREATE SCHEMA tenant_8; + +CREATE SEQUENCE dist_seq; +CREATE TABLE tenant_8.table_1(a bigint DEFAULT nextval('dist_seq') UNIQUE, "b" text, c bigint GENERATED BY DEFAULT AS IDENTITY); +INSERT INTO tenant_8.table_1("b") VALUES ('test'); + +BEGIN; + -- add column + ALTER TABLE tenant_8.table_1 ADD COLUMN d bigint DEFAULT 2; + SELECT * FROM tenant_8.table_1 ORDER BY c; + + -- alter default, set to 3 + ALTER TABLE tenant_8.table_1 ALTER COLUMN d SET DEFAULT 3; + INSERT INTO tenant_8.table_1("b") VALUES ('test'); + SELECT * FROM tenant_8.table_1 ORDER BY c; + + -- drop default, see null + ALTER TABLE tenant_8.table_1 ALTER COLUMN d DROP DEFAULT; + INSERT INTO tenant_8.table_1("b") VALUES ('test'); + SELECT * FROM tenant_8.table_1 ORDER BY c; + + -- cleanup the rows that were added to test the default behavior + DELETE FROM tenant_8.table_1 WHERE "b" = 'test' AND a > 9288674231451649; +COMMIT; + +-- alter column type +ALTER TABLE tenant_8.table_1 ALTER COLUMN d TYPE text; +UPDATE tenant_8.table_1 SET d = 'this is a text' WHERE d = '2'; +SELECT * FROM tenant_8.table_1 ORDER BY c; + +-- drop seq column +ALTER TABLE tenant_8.table_1 DROP COLUMN a; +SELECT * FROM tenant_8.table_1 ORDER BY c; + +-- add not null constraint +ALTER TABLE tenant_8.table_1 ALTER COLUMN b SET NOT NULL; + +-- we want to hide the error message context because the node reporting the foreign key +-- violation might change from one run to another. +\set VERBOSITY terse + +-- not null constraint violation, error out +INSERT INTO tenant_8.table_1 VALUES (NULL, 2, 'test'); + +\set VERBOSITY default + +-- drop not null constraint and try again +ALTER TABLE tenant_8.table_1 ALTER COLUMN b DROP NOT NULL; +INSERT INTO tenant_8.table_1 VALUES (NULL, 3, 'test'); +SELECT * FROM tenant_8.table_1 ORDER BY c; + +-- add exclusion constraint +ALTER TABLE tenant_8.table_1 ADD CONSTRAINT exc_b EXCLUDE USING btree (b with =); + +-- rename the exclusion constraint, errors out +ALTER TABLE tenant_8.table_1 RENAME CONSTRAINT exc_b TO exc_b_1; + +-- create exclusion constraint without a name +ALTER TABLE tenant_8.table_1 ADD EXCLUDE USING btree (b with =); + +INSERT INTO tenant_8.table_1 VALUES (100, 150, 'test150'); + +-- similarly, we want to hide the error message context here as well +\set VERBOSITY terse + +-- should error out due to exclusion constraint violation +INSERT INTO tenant_8.table_1 VALUES (100, 151, 'test151'); + +\set VERBOSITY default + +-- test setting autovacuum option +ALTER TABLE tenant_8.table_1 SET (autovacuum_enabled = false); + +BEGIN; + -- test multiple subcommands + ALTER TABLE tenant_8.table_1 ADD COLUMN int_column1 INTEGER, DROP COLUMN d, ADD COLUMN e bigint; + + UPDATE tenant_8.table_1 SET e = c * 10; + + -- test unique constraint without a name + ALTER TABLE tenant_8.table_1 ADD UNIQUE ("b"); + + -- test add / drop primary key + ALTER TABLE tenant_8.table_1 ADD PRIMARY KEY (c); + ALTER TABLE tenant_8.table_1 DROP CONSTRAINT table_1_pkey; + ALTER TABLE tenant_8.table_1 ADD PRIMARY KEY (e); + + SELECT * FROM tenant_8.table_1 ORDER BY c; + + -- test renaming table + ALTER TABLE tenant_8.table_1 RENAME TO table_2; + + -- test renaming column + ALTER TABLE tenant_8.table_2 RENAME COLUMN e TO f; + + -- test renaming an index + ALTER INDEX tenant_8.table_1_pkey RENAME TO table_1_pkey_renamed; +COMMIT; + +-- make sure that the shell table definition is same on all nodes +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_8.table_2') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +CREATE SCHEMA alter_table_add_column; + +\c - - - :master_port + +CREATE SCHEMA alter_table_add_column_other_schema; + +CREATE OR REPLACE FUNCTION alter_table_add_column_other_schema.my_random(numeric) + RETURNS numeric AS +$$ +BEGIN + RETURN 7 * $1; +END; +$$ +LANGUAGE plpgsql IMMUTABLE; + +SET search_path TO alter_table_add_column; + +CREATE COLLATION caseinsensitive ( + provider = icu, + locale = 'und-u-ks-level2' +); + +CREATE TYPE "simple_!\'custom_type" AS (a integer, b integer); + +\c - - - :worker_1_port + +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2092000;$$); + +SET citus.shard_replication_factor TO 1; +SET search_path TO alter_table_add_column; +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; + +CREATE TABLE referenced (int_col integer PRIMARY KEY); +CREATE TABLE referencing (text_col text); + +-- test alter table add column with various subcommands and options +ALTER TABLE referencing ADD COLUMN test_1 integer DEFAULT (alter_table_add_column_other_schema.my_random(7) + random() + 5) NOT NULL CONSTRAINT fkey REFERENCES referenced(int_col) ON UPDATE SET DEFAULT ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; +ALTER TABLE referencing ADD COLUMN test_2 integer UNIQUE REFERENCES referenced(int_col) ON UPDATE CASCADE ON DELETE SET DEFAULT NOT DEFERRABLE INITIALLY IMMEDIATE; + +BEGIN; + ALTER TABLE referencing ADD COLUMN test_3 integer GENERATED ALWAYS AS (test_1 * alter_table_add_column_other_schema.my_random(1)) STORED UNIQUE REFERENCES referenced(int_col) MATCH FULL; + ALTER TABLE referencing ADD COLUMN test_4 integer PRIMARY KEY WITH (fillfactor=70) NOT NULL REFERENCES referenced(int_col) MATCH SIMPLE ON UPDATE CASCADE ON DELETE SET DEFAULT; + ALTER TABLE referencing ADD COLUMN test_5 integer CONSTRAINT unique_c UNIQUE WITH (fillfactor=50) NULL; +COMMIT; + +ALTER TABLE referencing ADD COLUMN test_6 text COMPRESSION pglz COLLATE caseinsensitive NOT NULL; +ALTER TABLE referencing ADD COLUMN "test_\'!7" "simple_!\'custom_type"; + +-- we give up deparsing ALTER TABLE command if it needs to create a check constraint, and we fallback to legacy behavior +ALTER TABLE referencing ADD COLUMN test_8 integer CHECK (test_8 > 0); +ALTER TABLE referencing ADD COLUMN test_8 integer CONSTRAINT check_test_8 CHECK (test_8 > 0); + +-- error out properly even if the REFERENCES does not include the column list of the referenced table +ALTER TABLE referencing ADD COLUMN test_9 bool, ADD COLUMN test_10 int REFERENCES referenced; +ALTER TABLE referencing ADD COLUMN test_9 bool, ADD COLUMN test_10 int REFERENCES referenced(int_col); + +-- supress notice messages because we want to ignore the notice about skipping adding test_6 +-- on the shard, if the shard is local +SET client_min_messages TO WARNING; + +-- try to add test_6 again, but with IF NOT EXISTS +ALTER TABLE referencing ADD COLUMN IF NOT EXISTS test_6 text; +ALTER TABLE referencing ADD COLUMN IF NOT EXISTS test_6 integer; + +SET client_min_messages TO NOTICE; + +SELECT result FROM run_command_on_all_nodes( + $$SELECT get_grouped_fkey_constraints FROM get_grouped_fkey_constraints('alter_table_add_column.referencing')$$ +) +JOIN pg_dist_node USING (nodeid) +ORDER BY result; + +SELECT result FROM run_command_on_all_nodes( + $$SELECT get_index_defs FROM get_index_defs('alter_table_add_column', 'referencing')$$ +) +JOIN pg_dist_node USING (nodeid) +ORDER BY result; + +SELECT result FROM run_command_on_all_nodes( + $$SELECT get_column_defaults FROM get_column_defaults('alter_table_add_column', 'referencing')$$ +) +JOIN pg_dist_node USING (nodeid) +ORDER BY result; + +SELECT result FROM run_command_on_all_nodes( + $$SELECT get_column_attrs FROM get_column_attrs('alter_table_add_column.referencing')$$ +) +JOIN pg_dist_node USING (nodeid) +ORDER BY result; + +CREATE TABLE tenant_8.table_3 (a int, b text); +INSERT INTO tenant_8.table_3 SELECT i, 'text_' || i FROM generate_series(1, 100) AS i; + +-- test truncate +TRUNCATE tenant_8.table_3; +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=0 FROM tenant_8.table_3 +$$); + +BEGIN; + CREATE SCHEMA tenant_9; + + CREATE SEQUENCE tenant_9.seq_1 START 5000 INCREMENT 5; + + CREATE USER tenant_9_owner; + CREATE TABLE tenant_9.table_1 ( + a bigint NULL DEFAULT 100, + b text COLLATE "C" DEFAULT now()::text, + c int DEFAULT nextval('tenant_9.seq_1'::regclass), + d bigint GENERATED BY DEFAULT AS IDENTITY + ( + MINVALUE 5 + MAXVALUE 100 + START WITH 10 + ), + e int NOT NULL REFERENCES regular_schema.reference_table(id) MATCH FULL ON UPDATE RESTRICT ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, + f int GENERATED ALWAYS AS (c * 2) STORED, + CONSTRAINT table_1_pkey PRIMARY KEY (a, b), + CONSTRAINT table_1_unique_b UNIQUE NULLS DISTINCT (b, a), + CONSTRAINT table_1_check_a_positive CHECK (a > 0) + ) + PARTITION BY RANGE (a); +COMMIT; + +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_9.table_1') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +CREATE TABLE tenant_9.table_2 ( + a serial, + b bigserial +) +WITH (autovacuum_enabled = false, fillfactor = 20); + +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_9.table_2') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +CREATE UNLOGGED TABLE tenant_9.table_3 ( + a int, + b text STORAGE EXTERNAL COMPRESSION pglz, + c int generated always as (a * 2) stored, + d int generated always as (a * 3) stored, + e text CONSTRAINT table_3_e_check CHECK (length(e) < 25) +); + +\c - - - :master_port + +CREATE UNIQUE INDEX new_index ON tenant_9.table_3 USING btree (a); + +SELECT result FROM run_command_on_all_nodes($Q$ + SET citus.enable_ddl_propagation TO off; + CREATE FUNCTION fake_am_handler(internal) + RETURNS table_am_handler + AS 'citus' + LANGUAGE C; + CREATE ACCESS METHOD fake_am TYPE TABLE HANDLER fake_am_handler; + SET citus.enable_ddl_propagation TO on; +$Q$); + +-- Since Citus assumes access methods are part of the extension, make fake_am +-- owned manually to be able to pass checks on Citus while distributing tables. +SET client_min_messages TO WARNING; +ALTER EXTENSION citus ADD ACCESS METHOD fake_am; +SET client_min_messages TO NOTICE; + +CREATE ROLE test_non_super_user; +ALTER ROLE test_non_super_user NOSUPERUSER; + +CREATE ROLE rls_test_user_1 WITH LOGIN; +ALTER ROLE rls_test_user_1 NOSUPERUSER; + +CREATE ROLE rls_test_user_2 WITH LOGIN; +ALTER ROLE rls_test_user_2 NOSUPERUSER; + +CREATE TEXT SEARCH CONFIGURATION regular_schema.text_search_cfg (parser = default); + +GRANT USAGE ON SCHEMA tenant_9 TO rls_test_user_1, rls_test_user_2; + +\c - - - :worker_1_port + +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2093000;$$); + +SET citus.shard_replication_factor TO 1; +SET search_path TO alter_table_add_column; +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; + +ALTER TABLE tenant_9.table_3 SET LOGGED; +ALTER TABLE tenant_9.table_3 ALTER COLUMN b SET DATA TYPE varchar(100) USING b::varchar(100); +ALTER TABLE tenant_9.table_3 ALTER COLUMN e SET COMPRESSION pglz; +ALTER TABLE tenant_9.table_3 ADD UNIQUE USING INDEX new_index; +ALTER TABLE tenant_9.table_3 VALIDATE CONSTRAINT table_3_e_check; +ALTER TABLE tenant_9.table_3 ALTER COLUMN a SET NOT NULL; +ALTER TABLE tenant_9.table_3 REPLICA IDENTITY USING INDEX new_index; +CLUSTER tenant_9.table_3 USING new_index; + +-- not supported but let's keep as negative tests for future coverage +ALTER TABLE tenant_9.table_3 ALTER COLUMN d DROP EXPRESSION; +ALTER TABLE tenant_9.table_3 ALTER COLUMN c SET GENERATED BY DEFAULT RESTART WITH 500; +ALTER TABLE tenant_9.table_3 ALTER COLUMN c DROP IDENTITY IF EXISTS; +ALTER TABLE tenant_9.table_3 ALTER COLUMN a SET STATISTICS 50; +ALTER TABLE tenant_9.table_3 ALTER COLUMN b SET STORAGE RESET; +ALTER TABLE tenant_9.table_3 ALTER COLUMN b SET STORAGE MAIN; +ALTER TABLE tenant_9.table_3 CLUSTER ON new_index; + +CREATE TABLE tenant_9.table_4 (a int, b text); + +ALTER TABLE tenant_9.table_4 SET UNLOGGED; +ALTER TABLE tenant_9.table_4 SET ACCESS METHOD fake_am; +ALTER TABLE tenant_9.table_4 OWNER TO test_non_super_user; + +SET client_min_messages TO ERROR; +CREATE TABLE tenant_9.table_5 (a int, b text) USING fake_am; +SET client_min_messages TO NOTICE; + +SET citus.enable_schema_based_sharding TO OFF; +CREATE SCHEMA regular_schema_worker_1; +SET citus.enable_schema_based_sharding TO ON; + +CREATE TABLE regular_schema_worker_1.local_table_1 ( + a int, + b text +); + +CREATE STATISTICS ON a, b FROM regular_schema_worker_1.local_table_1; + +CREATE INDEX text_search_idx ON regular_schema_worker_1.local_table_1 +USING gin (to_tsvector('regular_schema.text_search_cfg'::regconfig, (COALESCE(b, ''::character varying))::text)); + +ALTER TABLE regular_schema_worker_1.local_table_1 SET SCHEMA tenant_9; +ALTER TABLE tenant_9.local_table_1 RENAME TO table_6; + +-- we don't support yet but let's still keep it +ALTER TABLE tenant_9.table_6 ALTER COLUMN 2 SET STATISTICS 101; + +SET citus.enable_schema_based_sharding TO OFF; +CREATE SCHEMA regular_schema_worker_2; +SET citus.enable_schema_based_sharding TO ON; + +CREATE TABLE regular_schema_worker_2.local_table_2 (a int, tenant_id int); +INSERT INTO regular_schema_worker_2.local_table_2 SELECT i, 1 FROM generate_series(1, 5) AS i; +INSERT INTO regular_schema_worker_2.local_table_2 SELECT i, 2 FROM generate_series(6, 10) AS i; +CREATE POLICY local_table_2_select_policy ON regular_schema_worker_2.local_table_2 FOR SELECT TO rls_test_user_1, rls_test_user_2 USING (current_user = 'rls_test_user_' || tenant_id::text); +GRANT SELECT ON TABLE regular_schema_worker_2.local_table_2 TO rls_test_user_1, rls_test_user_2; + +ALTER TABLE regular_schema_worker_2.local_table_2 SET SCHEMA tenant_9; +ALTER TABLE tenant_9.local_table_2 RENAME TO table_7; + +SET ROLE rls_test_user_1; +SELECT COUNT(*)=10 FROM tenant_9.table_7; + +SET ROLE rls_test_user_2; +SELECT COUNT(*)=10 FROM tenant_9.table_7; + +SET ROLE postgres; + +ALTER TABLE tenant_9.table_7 ENABLE ROW LEVEL SECURITY; + +SET ROLE rls_test_user_1; +SELECT COUNT(*)=5 FROM tenant_9.table_7; + +SET ROLE rls_test_user_2; +SELECT COUNT(*)=5 FROM tenant_9.table_7; + +SET ROLE postgres; + +SET citus.enable_schema_based_sharding TO OFF; +CREATE SCHEMA regular_schema_worker_3; +SET citus.enable_schema_based_sharding TO ON; + +CREATE TABLE regular_schema_worker_3.local_table_3 (value int, tenant_id int); + +\c - - - :master_port + +CREATE FUNCTION regular_schema.local_table_3_increment_value_tf() RETURNS trigger AS $local_table_3_increment_value_tf$ +BEGIN + UPDATE tenant_9.table_8 SET value=value+1; + RETURN NEW; +END; +$local_table_3_increment_value_tf$ LANGUAGE plpgsql; + +CREATE FUNCTION regular_schema.local_table_3_notice_value_tf() RETURNS trigger AS $local_table_3_notice_value_tf$ +BEGIN + RAISE NOTICE 'New value is %', NEW.value; + RETURN NEW; +END; +$local_table_3_notice_value_tf$ LANGUAGE plpgsql; + +\c - - - :worker_1_port + +SELECT result FROM run_command_on_coordinator($$ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2093500;$$); + +SET citus.shard_replication_factor TO 1; +SET search_path TO alter_table_add_column; +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; + +CREATE TRIGGER local_table_3_insert_statement_trigger +AFTER INSERT ON regular_schema_worker_3.local_table_3 +FOR EACH STATEMENT EXECUTE FUNCTION regular_schema.local_table_3_increment_value_tf(); + +-- Disable this to make sure that we allow triggers on distributed-schema tables +-- regardless of this setting as we don't think that triggers are unsafe on such +-- tables. +SET citus.enable_unsafe_triggers TO OFF; + +ALTER TABLE regular_schema_worker_3.local_table_3 SET SCHEMA tenant_9; +ALTER TABLE tenant_9.local_table_3 RENAME TO table_8; + +INSERT INTO tenant_9.table_8 VALUES (1), (1); + +-- Show that trigger is executed only once, we should see two "2"s, not "1", +-- i.e., the trigger didn't fire, and not "3", i.e., the trigger fired more +-- than once. +SELECT * FROM tenant_9.table_8; + +CREATE TRIGGER local_table_3_update_row_trigger +AFTER UPDATE ON tenant_9.table_8 +FOR EACH ROW EXECUTE FUNCTION regular_schema.local_table_3_notice_value_tf(); + +-- we want to hide the error message context because the node sending +-- the notice might change from one run to another. +\set VERBOSITY terse +UPDATE tenant_9.table_8 SET value=0; +\set VERBOSITY default + +ALTER TABLE tenant_9.table_8 DISABLE TRIGGER local_table_3_update_row_trigger; + +-- no notice should be raised +UPDATE tenant_9.table_8 SET value=0; + +ALTER TABLE tenant_9.table_8 DISABLE TRIGGER ALL; + +INSERT INTO tenant_9.table_8 VALUES (1), (1); + +SELECT * FROM tenant_9.table_8 ORDER BY value; + +ALTER TABLE tenant_9.table_8 ENABLE TRIGGER ALL; +ALTER TABLE tenant_9.table_8 DISABLE TRIGGER local_table_3_insert_statement_trigger; + +TRUNCATE tenant_9.table_8; + +INSERT INTO tenant_9.table_8 VALUES (2), (2); + +-- we want to hide the error message context because the node sending +-- the notice might change from one run to another. +\set VERBOSITY terse +UPDATE tenant_9.table_8 SET value=5; +\set VERBOSITY default + +SELECT * FROM tenant_9.table_8 ORDER BY value; + +ALTER TRIGGER local_table_3_insert_statement_trigger ON tenant_9.table_8 RENAME TO local_table_3_insert_statement_trigger_renamed; + +CREATE TRIGGER trigger_to_drop +AFTER UPDATE ON tenant_9.table_8 +FOR EACH ROW EXECUTE FUNCTION regular_schema.local_table_3_notice_value_tf(); + +DROP TRIGGER trigger_to_drop ON tenant_9.table_8; + +-- not supported at all +ALTER TRIGGER local_table_3_insert_statement_trigger_renamed ON tenant_9.table_8 DEPENDS ON EXTENSION citus; + +SET citus.enable_schema_based_sharding TO OFF; +CREATE SCHEMA regular_schema_worker_4; +SET citus.enable_schema_based_sharding TO ON; + +BEGIN; + -- Early in the transaction, force parallelization and make sure to use + -- remote connections even while accessing a local shard. + SET citus.force_max_query_parallelization TO ON; + SET citus.enable_local_execution TO OFF; + + SELECT SUM(id) FROM regular_schema.distributed_table; + + -- restore settings back + SET citus.force_max_query_parallelization TO OFF; + SET citus.enable_local_execution TO ON; + + CREATE TABLE regular_schema_worker_4.local_table_4 (a int, b text, c int); + CREATE INDEX index_with_name_1 ON regular_schema_worker_4.local_table_4 USING btree (a) WITH (fillfactor = 90); + CREATE INDEX index_with_name_2 ON regular_schema_worker_4.local_table_4 USING btree (b); + + ALTER TABLE regular_schema_worker_4.local_table_4 SET SCHEMA tenant_9; + ALTER TABLE tenant_9.local_table_4 RENAME TO table_9; + + DROP INDEX tenant_9.index_with_name_2; + + CREATE INDEX index_with_name_3 ON tenant_9.table_9 USING btree (b) WITH (fillfactor = 99); + CREATE INDEX ON tenant_9.table_9 USING btree (c); +COMMIT; + +CREATE INDEX CONCURRENTLY ON tenant_9.table_9 USING btree (a, b); + +REINDEX TABLE tenant_9.table_9; +REINDEX INDEX tenant_9.index_with_name_1; +REINDEX INDEX CONCURRENTLY tenant_9.index_with_name_3; + +CREATE INDEX index_with_name_4 ON tenant_9.table_9 USING btree (a DESC); +DROP INDEX CONCURRENTLY tenant_9.index_with_name_4; + +CREATE INDEX index_with_name_5 ON tenant_9.table_9 USING btree ((a + b::int) DESC) WITH (fillfactor = 60); + +ALTER INDEX tenant_9.index_with_name_1 RENAME TO index_with_name_1_renamed; + +ALTER INDEX tenant_9.index_with_name_3 RESET (fillfactor); +ALTER INDEX tenant_9.index_with_name_5 SET (fillfactor = 80); + +ALTER INDEX tenant_9.index_with_name_5 ALTER COLUMN 1 SET STATISTICS 4646; + +SET citus.enable_schema_based_sharding TO OFF; +CREATE SCHEMA regular_schema_worker_5; +SET citus.enable_schema_based_sharding TO ON; + +CREATE TABLE regular_schema_worker_5.local_table_5 (a int, b text, c int); +CREATE TABLE regular_schema_worker_5.local_table_6 (a int, b text, c int); + +CREATE VIEW regular_schema_worker_5.table_10_view_1 AS +SELECT a FROM regular_schema_worker_5.local_table_5 +UNION +SELECT a FROM regular_schema_worker_5.local_table_6; + +ALTER TABLE regular_schema_worker_5.local_table_5 SET SCHEMA tenant_9; +ALTER TABLE regular_schema_worker_5.local_table_6 SET SCHEMA tenant_9; + +ALTER TABLE tenant_9.local_table_5 RENAME TO table_10; +ALTER TABLE tenant_9.local_table_6 RENAME TO table_11; + +CREATE VIEW tenant_9.table_10_view_2 AS SELECT b FROM tenant_9.table_10; +CREATE VIEW tenant_9.table_10_view_3 AS SELECT c FROM tenant_9.table_11; + +DROP VIEW tenant_9.table_10_view_2; + +ALTER VIEW tenant_9.table_10_view_3 RENAME TO table_10_view_3_renamed; + +ALTER VIEW regular_schema_worker_5.table_10_view_1 SET SCHEMA tenant_9; + +ALTER VIEW tenant_9.table_10_view_1 OWNER TO test_non_super_user; + +-- check distributed views on all nodes +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT jsonb_agg( + jsonb_build_object ( + 'schemaname', n.nspname, + 'viewname', c.relname + ) + ORDER BY n.nspname, c.relname +) +FROM pg_class c +LEFT JOIN pg_namespace n ON n.oid = c.relnamespace +WHERE c.relkind = 'v' AND n.nspname IN ('regular_schema_worker_5', 'tenant_9') AND c.relname LIKE 'table_10_view_%'; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- check the owner of tenant_9.table_10_view_1 on all nodes +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT pg_get_userbyid(c.relowner) +FROM pg_class c +LEFT JOIN pg_namespace n ON n.oid = c.relnamespace +WHERE c.relkind = 'v' AND n.nspname = 'tenant_9' AND c.relname = 'table_10_view_1'; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- make sure that the shell table definition is same on all nodes +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_9.table_3') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_9.table_4') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_9.table_5') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_9.table_6') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT replace( + string_agg(ddl_events, '; '), + -- to avoid adding another test ouput for PG < 17, replace this with an empty string + ' GRANT MAINTAIN ON tenant_9.table_7 TO postgres;', + '' +) FROM master_get_table_ddl_events('tenant_9.table_7') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_9.table_8') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('tenant_9.table_9') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- Considering the various ways a table can use sequences, test if we +-- properly adjust sequence min / max values on all worker nodes, +-- including the current one, as well as testing if we sync last_value +-- to the coordinator. + +SET citus.enable_schema_based_sharding TO OFF; +CREATE SCHEMA initially_local_schema_seq_test_with_initial_data; + +SET citus.enable_schema_based_sharding TO ON; + +-- schema to move tables under initially_local_schema_seq_test_with_initial_data into +CREATE SCHEMA dist_schema_seq_test_with_initial_data; + +CREATE SCHEMA dist_schema_seq_test_without_initial_data; + +-- create sequences and a table under initially_local_schema_seq_test_with_initial_data, and move the table to dist_schema_seq_test_with_initial_data + +CREATE SEQUENCE initially_local_schema_seq_test_with_initial_data.bigint_col_bigint_sequence AS bigint; +CREATE SEQUENCE initially_local_schema_seq_test_with_initial_data.bigint_col_int_sequence AS int; +CREATE SEQUENCE initially_local_schema_seq_test_with_initial_data.int_col_bigint_sequence AS bigint; +CREATE SEQUENCE initially_local_schema_seq_test_with_initial_data.int_col_int_sequence AS int; +CREATE SEQUENCE initially_local_schema_seq_test_with_initial_data.smallint_col_smallint_sequence AS smallint; + +-- also create some sequences with custom settings +CREATE SEQUENCE initially_local_schema_seq_test_with_initial_data.custom_bigint_col_bigint_sequence AS bigint MINVALUE 1000 MAXVALUE 1000000 START WITH 5000 INCREMENT 100; +CREATE SEQUENCE initially_local_schema_seq_test_with_initial_data.custom_int_col_int_sequence AS int MINVALUE 100 MAXVALUE 2000000 START WITH 1000 INCREMENT 5; +CREATE SEQUENCE initially_local_schema_seq_test_with_initial_data.custom_smallint_col_smallint_sequence AS smallint MINVALUE 10 MAXVALUE 1000 START WITH 50 INCREMENT 15; + +CREATE TABLE initially_local_schema_seq_test_with_initial_data.nextval_test ( + id int, + column_to_drop_8 text, + bigint_col_with_bigint_sequence bigint DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.bigint_col_bigint_sequence'::regclass), + column_to_drop_1 text, + bigint_col_with_int_sequence bigint DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.bigint_col_int_sequence'::regclass), + column_to_drop_6 text, + int_col_with_bigint_sequence int DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.int_col_bigint_sequence'::regclass), + column_to_drop_2 text, + column_to_drop_3 text, + column_to_drop_5 text, + int_col_with_int_sequence int DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.int_col_int_sequence'::regclass), + column_to_drop_4 text, + column_to_drop_7 text, + smallint_col_with_smallint_sequence smallint DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.smallint_col_smallint_sequence'::regclass), + bigint_col_with_custom_bigint_sequence bigint DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.custom_bigint_col_bigint_sequence'::regclass), + int_col_with_custom_int_sequence int DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.custom_int_col_int_sequence'::regclass), + smallint_col_with_custom_smallint_sequence smallint DEFAULT nextval('initially_local_schema_seq_test_with_initial_data.custom_smallint_col_smallint_sequence'::regclass) +); + +-- Mark some of the sequences as owned by the columns. +-- Note that, marking a sequence as owned by a table column will cause +-- automatically moving the sequence to the same schema with the table +-- when moving the table to another schema. +ALTER SEQUENCE initially_local_schema_seq_test_with_initial_data.bigint_col_bigint_sequence OWNED BY initially_local_schema_seq_test_with_initial_data.nextval_test.bigint_col_with_bigint_sequence; +ALTER SEQUENCE initially_local_schema_seq_test_with_initial_data.bigint_col_int_sequence OWNED BY initially_local_schema_seq_test_with_initial_data.nextval_test.bigint_col_with_int_sequence; +ALTER SEQUENCE initially_local_schema_seq_test_with_initial_data.int_col_bigint_sequence OWNED BY initially_local_schema_seq_test_with_initial_data.nextval_test.int_col_with_bigint_sequence; + +INSERT INTO initially_local_schema_seq_test_with_initial_data.nextval_test (id) SELECT i FROM generate_series(1, 5) AS i; + +ALTER TABLE initially_local_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_1; +ALTER TABLE initially_local_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_2; +ALTER TABLE initially_local_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_3; + +ALTER TABLE initially_local_schema_seq_test_with_initial_data.nextval_test SET SCHEMA dist_schema_seq_test_with_initial_data; + +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_4; + +CREATE SEQUENCE dist_schema_seq_test_with_initial_data.added_bigint_col_bigint_sequence AS bigint; +CREATE SEQUENCE dist_schema_seq_test_with_initial_data.added_bigint_col_int_sequence AS int; +CREATE SEQUENCE dist_schema_seq_test_with_initial_data.added_int_col_bigint_sequence AS bigint; +CREATE SEQUENCE dist_schema_seq_test_with_initial_data.added_int_col_int_sequence AS int; +CREATE SEQUENCE dist_schema_seq_test_with_initial_data.added_smallint_col_smallint_sequence AS smallint; + +-- also create some sequences with custom settings +CREATE SEQUENCE dist_schema_seq_test_with_initial_data.added_custom_bigint_col_bigint_sequence AS bigint MINVALUE 1050 MAXVALUE 1000050 START WITH 5050 INCREMENT 150; +CREATE SEQUENCE dist_schema_seq_test_with_initial_data.added_custom_int_col_int_sequence AS int MINVALUE 150 MAXVALUE 2000050 START WITH 1000 INCREMENT 55; +CREATE SEQUENCE dist_schema_seq_test_with_initial_data.added_custom_smallint_col_smallint_sequence AS smallint MINVALUE 60 MAXVALUE 1050 START WITH 100 INCREMENT 65; + +-- all fails because the table is not not empty +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_bigint_col_bigint_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_bigint_col_int_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_int_col_with_bigint_sequence int DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_int_col_bigint_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_int_col_with_int_sequence int DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_int_col_int_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_smallint_col_with_smallint_sequence smallint DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_smallint_col_smallint_sequence'::regclass); + +-- so let's add the columns first, then alter their column default expressions +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_bigint_col_with_bigint_sequence bigint; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ALTER COLUMN added_bigint_col_with_bigint_sequence SET DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_bigint_col_bigint_sequence'::regclass); + +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_5; + +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_bigint_col_with_int_sequence bigint; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ALTER COLUMN added_bigint_col_with_int_sequence SET DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_bigint_col_int_sequence'::regclass); + +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_6; + +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_int_col_with_bigint_sequence int; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ALTER COLUMN added_int_col_with_bigint_sequence SET DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_int_col_bigint_sequence'::regclass); + +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_7; + +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_int_col_with_int_sequence int; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ALTER COLUMN added_int_col_with_int_sequence SET DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_int_col_int_sequence'::regclass); + +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_8; + +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN column_to_drop_9 text; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test DROP COLUMN column_to_drop_9; + +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_smallint_col_with_smallint_sequence smallint; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ALTER COLUMN added_smallint_col_with_smallint_sequence SET DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_smallint_col_smallint_sequence'::regclass); + +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_bigint_col_with_custom_bigint_sequence bigint; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ALTER COLUMN added_bigint_col_with_custom_bigint_sequence SET DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_custom_bigint_col_bigint_sequence'::regclass); + +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_int_col_with_custom_int_sequence int; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ALTER COLUMN added_int_col_with_custom_int_sequence SET DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_custom_int_col_int_sequence'::regclass); + +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ADD COLUMN added_smallint_col_with_custom_smallint_sequence smallint; +ALTER TABLE dist_schema_seq_test_with_initial_data.nextval_test ALTER COLUMN added_smallint_col_with_custom_smallint_sequence SET DEFAULT nextval('dist_schema_seq_test_with_initial_data.added_custom_smallint_col_smallint_sequence'::regclass); + +-- Check nextval sequences. +-- bigint_col_int_sequence and added_bigint_col_int_sequence should become bigint sequences, see EnsureDistributedSequencesHaveOneType() +SELECT result FROM run_command_on_all_nodes( +$$ +WITH sequence_info AS ( + SELECT name::regclass::text, (get_sequence_info(name::regclass)).* + FROM UNNEST(ARRAY[ + 'dist_schema_seq_test_with_initial_data.bigint_col_bigint_sequence', + 'dist_schema_seq_test_with_initial_data.bigint_col_int_sequence', + 'dist_schema_seq_test_with_initial_data.int_col_bigint_sequence', + 'initially_local_schema_seq_test_with_initial_data.int_col_int_sequence', + 'initially_local_schema_seq_test_with_initial_data.smallint_col_smallint_sequence', + 'dist_schema_seq_test_with_initial_data.added_bigint_col_bigint_sequence', + 'dist_schema_seq_test_with_initial_data.added_bigint_col_int_sequence', + 'dist_schema_seq_test_with_initial_data.added_int_col_bigint_sequence', + 'dist_schema_seq_test_with_initial_data.added_int_col_int_sequence', + 'dist_schema_seq_test_with_initial_data.added_smallint_col_smallint_sequence', + 'initially_local_schema_seq_test_with_initial_data.custom_bigint_col_bigint_sequence', + 'initially_local_schema_seq_test_with_initial_data.custom_int_col_int_sequence', + 'initially_local_schema_seq_test_with_initial_data.custom_smallint_col_smallint_sequence', + 'dist_schema_seq_test_with_initial_data.added_custom_bigint_col_bigint_sequence', + 'dist_schema_seq_test_with_initial_data.added_custom_int_col_int_sequence', + 'dist_schema_seq_test_with_initial_data.added_custom_smallint_col_smallint_sequence' + ]) AS qualified_sequence_name(name) +) +SELECT jsonb_agg( + jsonb_build_object( + 'name', name, + 'type_name', type_name, + 'start_value', start_value, + 'last_value', last_value, + 'min_value', min_value, + 'max_value', max_value + ) + ORDER BY name +) +FROM sequence_info; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- check nextval calls used in table definition +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('dist_schema_seq_test_with_initial_data.nextval_test') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- Should succeed on all nodes as we don't try inserting column default values +-- for the columns that are using int / smallint based sequences. +-- Doing so is okay from the coordinator but would cause an error on workers. +SELECT result FROM run_command_on_all_nodes( + $$ + WITH ins AS ( + INSERT INTO dist_schema_seq_test_with_initial_data.nextval_test VALUES (10, DEFAULT, DEFAULT, 1, 1, 1, DEFAULT, 1, 1, DEFAULT, DEFAULT, 1, 1, 1, DEFAULT, 1, 1) RETURNING * + ) + SELECT to_jsonb(ins) FROM ins; + $$, + parallel => false +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- succeeds on the coordinator +SELECT result FROM run_command_on_coordinator( + $$ + WITH ins AS ( + INSERT INTO dist_schema_seq_test_with_initial_data.nextval_test VALUES (11, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT) RETURNING * + ) + SELECT to_jsonb(ins) FROM ins; + $$ +); + +-- all fail on workers +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_with_initial_data.nextval_test VALUES (1, 1, 1, DEFAULT, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)$$, parallel => false); +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_with_initial_data.nextval_test VALUES (1, 1, 1, 1, DEFAULT, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)$$, parallel => false); +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_with_initial_data.nextval_test VALUES (1, 1, 1, 1, 1, DEFAULT, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)$$, parallel => false); + +SELECT * FROM dist_schema_seq_test_with_initial_data.nextval_test ORDER BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17; + +DROP TABLE dist_schema_seq_test_with_initial_data.nextval_test; + +-- After dropping the table, make sure that only the sequences for which +-- we didn't execute "alter sequence ... owned by column" are left. +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT array_agg(sequencename ORDER BY sequencename) AS sequences +FROM pg_sequences +WHERE + schemaname IN ( + 'initially_local_schema_seq_test_with_initial_data', + 'dist_schema_seq_test_with_initial_data' + ) AND + sequencename IN ( + 'bigint_col_bigint_sequence', + 'bigint_col_int_sequence', + 'int_col_bigint_sequence', + 'int_col_int_sequence', + 'smallint_col_smallint_sequence', + 'added_bigint_col_bigint_sequence', + 'added_bigint_col_int_sequence', + 'added_int_col_bigint_sequence', + 'added_int_col_int_sequence', + 'added_smallint_col_smallint_sequence', + 'custom_bigint_col_bigint_sequence', + 'custom_int_col_int_sequence', + 'custom_smallint_col_smallint_sequence', + 'added_custom_bigint_col_bigint_sequence', + 'added_custom_int_col_int_sequence', + 'added_custom_smallint_col_smallint_sequence' + ); +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- create a table with built-in sequences under initially_local_schema_seq_test_with_initial_data, and move the table to dist_schema_seq_test_with_initial_data + +CREATE TABLE initially_local_schema_seq_test_with_initial_data.built_in_seq_test ( + id int, + column_to_drop_1 text, + smallserial_col smallserial, + column_to_drop_2 text, + column_to_drop_3 text, + serial_col serial, + column_to_drop_4 text, + bigserial_col bigserial, + column_to_drop_5 text, + generated_smallint_col smallint GENERATED BY DEFAULT AS IDENTITY, + column_to_drop_6 text, + column_to_drop_7 text, + generated_int_col int GENERATED BY DEFAULT AS IDENTITY, + generated_bigint_col bigint GENERATED BY DEFAULT AS IDENTITY (START WITH 178 INCREMENT BY 17 MINVALUE 100 MAXVALUE 1500600) +); + +INSERT INTO initially_local_schema_seq_test_with_initial_data.built_in_seq_test (id) SELECT i FROM generate_series(1, 5) AS i; + +ALTER TABLE initially_local_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN column_to_drop_1; +ALTER TABLE initially_local_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN column_to_drop_2; +ALTER TABLE initially_local_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN column_to_drop_4; +ALTER TABLE initially_local_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN column_to_drop_6; + +ALTER TABLE initially_local_schema_seq_test_with_initial_data.built_in_seq_test SET SCHEMA dist_schema_seq_test_with_initial_data; + +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN column_to_drop_3; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN column_to_drop_5; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN column_to_drop_7; + +-- all fails we cannot add serial-based columns as well as identity columns +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_smallserial_col smallserial; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_serial_col serial; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_bigserial_col bigserial; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_smallint_col smallint GENERATED BY DEFAULT AS IDENTITY; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_int_col int GENERATED BY DEFAULT AS IDENTITY; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_bigint_col bigint GENERATED BY DEFAULT AS IDENTITY; + +-- also, we cannot add a column and just alter its type to a serial-based one later, but normally Postgres allows altering the columns to identity columns later, but we don't +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_smallint_col smallint GENERATED BY DEFAULT AS IDENTITY; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_int_col int GENERATED BY DEFAULT AS IDENTITY; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_bigint_col bigint GENERATED BY DEFAULT AS IDENTITY; + +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_smallint_col smallint; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ALTER COLUMN added_generated_smallint_col ADD GENERATED BY DEFAULT AS IDENTITY; + +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_int_col int; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ALTER COLUMN added_generated_int_col ADD GENERATED BY DEFAULT AS IDENTITY; + +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ADD COLUMN added_generated_bigint_col bigint; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test ALTER COLUMN added_generated_bigint_col ADD GENERATED BY DEFAULT AS IDENTITY; + +-- let's drop them too as they're not useful now +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN added_generated_smallint_col; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN added_generated_int_col; +ALTER TABLE dist_schema_seq_test_with_initial_data.built_in_seq_test DROP COLUMN added_generated_bigint_col; + +-- check built-in sequences +SELECT result FROM run_command_on_all_nodes( +$$ +WITH sequence_info AS ( + SELECT oid::regclass::text AS name, (get_sequence_info(oid)).* + FROM ( + SELECT objid + FROM pg_depend d + JOIN pg_class c ON c.oid = d.objid + WHERE d.refobjid = 'dist_schema_seq_test_with_initial_data.built_in_seq_test'::regclass AND + c.relkind = 'S' + ) sequence(oid) +) +SELECT jsonb_agg( + jsonb_build_object( + 'name', name, + 'type_name', type_name, + 'start_value', start_value, + 'last_value', last_value, + 'min_value', min_value, + 'max_value', max_value + ) + ORDER BY name +) +FROM sequence_info; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- check nextval calls used in table definition +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('dist_schema_seq_test_with_initial_data.built_in_seq_test') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- Should succeed on all nodes as we don't try inserting column default values +-- for the columns that are using int / smallint based sequences. +-- Doing so is okay from the coordinator but would cause an error on workers. +SELECT result FROM run_command_on_all_nodes( + $$ + WITH ins AS ( + INSERT INTO dist_schema_seq_test_with_initial_data.built_in_seq_test VALUES (10, 1, 1, 1, 1, 1, DEFAULT) RETURNING * + ) + SELECT to_jsonb(ins) FROM ins; + $$, + parallel => false +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- succeeds on the coordinator +SELECT result FROM run_command_on_coordinator( + $$ + WITH ins AS ( + INSERT INTO dist_schema_seq_test_with_initial_data.built_in_seq_test VALUES (11, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT) RETURNING * + ) + SELECT to_jsonb(ins) FROM ins; + $$ +); + +-- all fail on workers +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_with_initial_data.built_in_seq_test VALUES (1, DEFAULT, 1, 1, 1, 1, 1)$$, parallel => false); +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_with_initial_data.built_in_seq_test VALUES (1, 1, DEFAULT, 1, 1, 1, 1)$$, parallel => false); +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_with_initial_data.built_in_seq_test VALUES (1, 1, 1, 1, DEFAULT, 1, 1)$$, parallel => false); +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_with_initial_data.built_in_seq_test VALUES (1, 1, 1, 1, 1, DEFAULT, 1)$$, parallel => false); + +SELECT * FROM dist_schema_seq_test_with_initial_data.built_in_seq_test ORDER BY 1, 2, 3, 4, 5, 6, 7; + +-- create sequences and a table under dist_schema_seq_test_without_initial_data + +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.bigint_col_bigint_sequence AS bigint MINVALUE 1000 MAXVALUE 1000000 START WITH 5000 INCREMENT 100; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.bigint_col_int_sequence AS int; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.int_col_bigint_sequence AS bigint; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.int_col_int_sequence AS int; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.smallint_col_smallint_sequence AS smallint; + +CREATE TABLE dist_schema_seq_test_without_initial_data.nextval_test ( + id int, + bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.bigint_col_bigint_sequence'::regclass), + bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.bigint_col_int_sequence'::regclass), + int_col_with_bigint_sequence int DEFAULT nextval('dist_schema_seq_test_without_initial_data.int_col_bigint_sequence'::regclass), + int_col_with_int_sequence int DEFAULT nextval('dist_schema_seq_test_without_initial_data.int_col_int_sequence'::regclass), + smallint_col_with_smallint_sequence smallint DEFAULT nextval('dist_schema_seq_test_without_initial_data.smallint_col_smallint_sequence'::regclass) +); + +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_bigint_col_bigint_sequence AS bigint; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_bigint_col_int_sequence AS int; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_int_col_bigint_sequence AS bigint; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_int_col_int_sequence AS int; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_smallint_col_smallint_sequence AS smallint; + +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_bigint_col_with_bigint_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_bigint_col_bigint_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_bigint_col_with_int_sequence bigint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_bigint_col_int_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_int_col_with_bigint_sequence int DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_int_col_bigint_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_int_col_with_int_sequence int DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_int_col_int_sequence'::regclass); +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_smallint_col_with_smallint_sequence smallint DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_smallint_col_smallint_sequence'::regclass); + +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_altered_bigint_col_bigint_sequence AS bigint; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_altered_bigint_col_int_sequence AS int; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_altered_int_col_bigint_sequence AS bigint; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_altered_int_col_int_sequence AS int; +CREATE SEQUENCE dist_schema_seq_test_without_initial_data.added_altered_smallint_col_smallint_sequence AS smallint; + +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_altered_bigint_col_with_bigint_sequence bigint; +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ALTER COLUMN added_altered_bigint_col_with_bigint_sequence SET DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_bigint_col_bigint_sequence'::regclass); + +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_altered_bigint_col_with_int_sequence bigint; +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ALTER COLUMN added_altered_bigint_col_with_int_sequence SET DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_bigint_col_int_sequence'::regclass); + +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_altered_int_col_with_bigint_sequence int; +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ALTER COLUMN added_altered_int_col_with_bigint_sequence SET DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_int_col_bigint_sequence'::regclass); + +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_altered_int_col_with_int_sequence int; +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ALTER COLUMN added_altered_int_col_with_int_sequence SET DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_int_col_int_sequence'::regclass); + +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ADD COLUMN added_altered_smallint_col_with_smallint_sequence smallint; +ALTER TABLE dist_schema_seq_test_without_initial_data.nextval_test ALTER COLUMN added_altered_smallint_col_with_smallint_sequence SET DEFAULT nextval('dist_schema_seq_test_without_initial_data.added_altered_smallint_col_smallint_sequence'::regclass); + +-- Check nextval sequences. +-- bigint_col_int_sequence, added_bigint_col_int_sequence and added_altered_bigint_col_int_sequence should become bigint sequences, see EnsureDistributedSequencesHaveOneType() +SELECT result FROM run_command_on_all_nodes( +$$ +WITH sequence_info AS ( + SELECT name::regclass::text, (get_sequence_info(name::regclass)).* + FROM UNNEST(ARRAY[ + 'dist_schema_seq_test_without_initial_data.bigint_col_bigint_sequence', + 'dist_schema_seq_test_without_initial_data.bigint_col_int_sequence', + 'dist_schema_seq_test_without_initial_data.int_col_bigint_sequence', + 'dist_schema_seq_test_without_initial_data.int_col_int_sequence', + 'dist_schema_seq_test_without_initial_data.smallint_col_smallint_sequence', + 'dist_schema_seq_test_without_initial_data.added_bigint_col_bigint_sequence', + 'dist_schema_seq_test_without_initial_data.added_bigint_col_int_sequence', + 'dist_schema_seq_test_without_initial_data.added_int_col_bigint_sequence', + 'dist_schema_seq_test_without_initial_data.added_int_col_int_sequence', + 'dist_schema_seq_test_without_initial_data.added_smallint_col_smallint_sequence', + 'dist_schema_seq_test_without_initial_data.added_altered_bigint_col_bigint_sequence', + 'dist_schema_seq_test_without_initial_data.added_altered_bigint_col_int_sequence', + 'dist_schema_seq_test_without_initial_data.added_altered_int_col_bigint_sequence', + 'dist_schema_seq_test_without_initial_data.added_altered_int_col_int_sequence', + 'dist_schema_seq_test_without_initial_data.added_altered_smallint_col_smallint_sequence' + ]) AS qualified_sequence_name(name) +) +SELECT jsonb_agg( + jsonb_build_object( + 'name', name, + 'type_name', type_name, + 'start_value', start_value, + 'last_value', last_value, + 'min_value', min_value, + 'max_value', max_value + ) + ORDER BY name +) +FROM sequence_info; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- check nextval calls used in table definition +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('dist_schema_seq_test_without_initial_data.nextval_test') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- Should succeed on all nodes as we don't try inserting column default values +-- for the columns that are using int / smallint based sequences. +-- Doing so is okay from the coordinator but would cause an error on workers. +SELECT result FROM run_command_on_all_nodes( + $$ + WITH ins AS ( + INSERT INTO dist_schema_seq_test_without_initial_data.nextval_test VALUES (10, DEFAULT, DEFAULT, 1, 1, 1, DEFAULT, DEFAULT, 1, 1, 1, DEFAULT, DEFAULT, 1, 1, 1) RETURNING * + ) + SELECT to_jsonb(ins) FROM ins; + $$, + parallel => false +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- will fail on workers but should still succeed on the coordinator +SELECT result FROM run_command_on_all_nodes( + $$ + WITH ins AS ( + INSERT INTO dist_schema_seq_test_without_initial_data.nextval_test VALUES (11, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT) RETURNING * + ) + SELECT to_jsonb(ins) FROM ins; + $$, + parallel => false +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- all fail on workers, specifically test int / smallint columns added later +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_without_initial_data.nextval_test VALUES (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, DEFAULT, 1, 1, 1, 1, 1)$$, parallel => false); +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_without_initial_data.nextval_test VALUES (1, 1, 1, 1, 1, 1, 1, 1, 1, DEFAULT, 1, 1, 1, 1, 1, 1)$$, parallel => false); +SELECT result FROM run_command_on_workers($$INSERT INTO dist_schema_seq_test_without_initial_data.nextval_test VALUES (1, 1, 1, 1, 1, 1, 1, 1, DEFAULT, 1, 1, 1, 1, 1, 1, 1)$$, parallel => false); + +SELECT * FROM dist_schema_seq_test_without_initial_data.nextval_test ORDER BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16; + +-- create a table with built-in sequences under dist_schema_seq_test_without_initial_data + +CREATE TABLE dist_schema_seq_test_without_initial_data.built_in_seq_test ( + id int, + smallserial_col smallserial, + serial_col serial, + bigserial_col bigserial, + generated_smallint_col smallint GENERATED BY DEFAULT AS IDENTITY, + generated_int_col int GENERATED BY DEFAULT AS IDENTITY, + generated_bigint_col bigint GENERATED BY DEFAULT AS IDENTITY (START WITH 178 INCREMENT BY 17 MINVALUE 100 MAXVALUE 1500600) +); + +-- check built-in sequences +SELECT result FROM run_command_on_all_nodes( +$$ +WITH sequence_info AS ( + SELECT oid::regclass::text AS name, (get_sequence_info(oid)).* + FROM ( + SELECT objid + FROM pg_depend d + JOIN pg_class c ON c.oid = d.objid + WHERE d.refobjid = 'dist_schema_seq_test_without_initial_data.built_in_seq_test'::regclass AND + c.relkind = 'S' + ) sequence(oid) +) +SELECT jsonb_agg( + jsonb_build_object( + 'name', name, + 'type_name', type_name, + 'start_value', start_value, + 'last_value', last_value, + 'min_value', min_value, + 'max_value', max_value + ) + ORDER BY name +) +FROM sequence_info; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- check nextval calls used in table definition +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT string_agg(ddl_events, '; ') FROM master_get_table_ddl_events('dist_schema_seq_test_without_initial_data.built_in_seq_test') AS ddl_events; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- Should succeed on all nodes as we don't try inserting column default values +-- for the columns that are using int / smallint based sequences. +-- Doing so is okay from the coordinator but would cause an error on workers. +SELECT result FROM run_command_on_all_nodes( + $$ + WITH ins AS ( + INSERT INTO dist_schema_seq_test_without_initial_data.built_in_seq_test VALUES (10, 1, 1, DEFAULT, 1, 1, DEFAULT) RETURNING * + ) + SELECT to_jsonb(ins) FROM ins; + $$, + parallel => false +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- will fail on workers but should still succeed on the coordinator +SELECT result FROM run_command_on_all_nodes( + $$ + WITH ins AS ( + INSERT INTO dist_schema_seq_test_without_initial_data.built_in_seq_test VALUES (11, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT) RETURNING * + ) + SELECT to_jsonb(ins) FROM ins; + $$, + parallel => false +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +SELECT * FROM dist_schema_seq_test_without_initial_data.built_in_seq_test ORDER BY 1, 2, 3, 4, 5, 6, 7; + +-- intermediate cleanup +\c - - - :master_port + +SET client_min_messages TO WARNING; +DROP SCHEMA tenant_1, tenant_2, tenant_3, tenant_4, tenant_5, tenant_6, tenant_7, tenant_8, tenant_9, alter_table_add_column, initially_local_schema_seq_test_with_initial_data, dist_schema_seq_test_with_initial_data, initially_local_schema_seq_test_with_initial_data, dist_schema_seq_test_without_initial_data CASCADE; +DROP SCHEMA regular_schema, alter_table_add_column_other_schema, regular_schema_worker_1, regular_schema_worker_2, regular_schema_worker_3, regular_schema_worker_4, regular_schema_worker_5 CASCADE; +DROP FUNCTION create_citus_local_with_data(text), get_sequence_info(regclass); +DROP SEQUENCE dist_seq; +DROP ROLE tenant_9_owner; +ALTER EXTENSION citus DROP ACCESS METHOD fake_am; + +SELECT result FROM run_command_on_all_nodes($Q$ + DROP FUNCTION fake_am_handler(internal) CASCADE; +$Q$); + +DROP ROLE rls_test_user_1, rls_test_user_2; + +-- do the work that's required to be done from the coordinator for the rest of the tests + +ALTER ROLE test_non_super_user WITH LOGIN; +GRANT CREATE ON DATABASE regression TO test_non_super_user; + +SET citus.next_shard_id TO 2094000; +SET client_min_messages TO WARNING; + +-- Remove a node, create a reference table, and the node back so we can test +-- implicit reference table replication when creating a distributed-schema +-- table. +SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); + +CREATE SCHEMA regular_schema; + +CREATE TABLE regular_schema.reference_table (id bigint PRIMARY KEY); +SELECT create_reference_table('regular_schema.reference_table'); + +INSERT INTO regular_schema.reference_table SELECT i FROM generate_series(1, 10) AS i; + +GRANT USAGE ON SCHEMA regular_schema TO test_non_super_user; +GRANT SELECT ON regular_schema.reference_table TO test_non_super_user; +GRANT REFERENCES ON regular_schema.reference_table TO test_non_super_user; +GRANT DELETE ON regular_schema.reference_table TO test_non_super_user; + +-- add it with the groupid used earlier in the test file +SELECT 1 FROM citus_add_node('localhost', :worker_2_port, groupid => 47); + +ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2094050; + +\c - test_non_super_user - :worker_1_port + +SET citus.shard_replication_factor TO 1; +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; + +BEGIN; + CREATE SCHEMA tenant_10; + CREATE TABLE tenant_10.t1(id int REFERENCES regular_schema.reference_table(id)); + + INSERT INTO tenant_10.t1 SELECT i FROM generate_series(1, 5) AS i; + DELETE FROM tenant_10.t1 WHERE id > 3; + DELETE FROM regular_schema.reference_table WHERE id > 3; +COMMIT; + +\c - postgres - :master_port + +-- do the work that's required to be done from the coordinator for the rest of the tests + +SET citus.next_shard_id TO 2094100; +SET client_min_messages TO WARNING; + +DROP SCHEMA tenant_10 CASCADE; +DROP TABLE regular_schema.reference_table; + +-- Remove a node, create a reference table, and the node back so we can test +-- implicit reference table replication when creating a distributed-schema +-- table. +SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); + +CREATE TABLE regular_schema.reference_table (id bigint PRIMARY KEY); +SELECT create_reference_table('regular_schema.reference_table'); + +INSERT INTO regular_schema.reference_table SELECT i FROM generate_series(1, 10) AS i; + +GRANT USAGE ON SCHEMA regular_schema TO test_non_super_user; +GRANT SELECT ON regular_schema.reference_table TO test_non_super_user; +GRANT REFERENCES ON regular_schema.reference_table TO test_non_super_user; +GRANT DELETE ON regular_schema.reference_table TO test_non_super_user; + +-- add it with the groupid used earlier in the test file +SELECT 1 FROM citus_add_node('localhost', :worker_2_port, groupid => 47); + +ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2094150; + +\c - test_non_super_user - :worker_1_port + +SET citus.shard_replication_factor TO 1; +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; + +-- This time first create an empty schema to force placing the shard placements +-- of the tables under tenant_12 to the other worker node, differently than the +-- one chosed for the shard placements under tenant_10. +BEGIN; + CREATE SCHEMA tenant_11; + CREATE SCHEMA tenant_12; + CREATE TABLE tenant_12.t1(id int REFERENCES regular_schema.reference_table(id)); + + INSERT INTO tenant_12.t1 SELECT i FROM generate_series(1, 5) AS i; + DELETE FROM tenant_12.t1 WHERE id > 3; + DELETE FROM regular_schema.reference_table WHERE id > 3; +COMMIT; + +-- Ensure non-super user can alter / drop distributed-schema tables and +-- create / drop distributed-schemas too. + +ALTER TABLE tenant_12.t1 RENAME TO "\!@#?t1_renamed"; +ALTER SCHEMA tenant_12 RENAME TO "\!@#?tenant_12_renamed"; + +SET search_path TO "\!@#?tenant_12_renamed"; +DROP TABLE "\!@#?t1_renamed"; +CREATE TABLE "\!@#?t3"(id int); +ALTER TABLE "\!@#?t3" RENAME TO "t3"; +RESET search_path; + +ALTER SCHEMA "\!@#?tenant_12_renamed" RENAME TO tenant_12; + +CREATE TABLE tenant_12.t2(a int); +ALTER TABLE tenant_12.t2 RENAME TO t2_renamed; +DROP SCHEMA tenant_11; +CREATE SCHEMA tenant_13; + +SELECT result FROM run_command_on_all_nodes( +$$ +SELECT jsonb_agg( + jsonb_build_object ( + 'nspname', nspname, + 'relnames', relnames + ) + ORDER BY nspname +) +FROM ( + SELECT nspname, + array_agg(relname ORDER BY relname) AS relnames + FROM pg_namespace + LEFT JOIN pg_class ON (pg_namespace.oid = pg_class.relnamespace) + WHERE nspname IN ('tenant_11', 'tenant_12', 'tenant_13') AND + (pg_class.oid is NULL OR NOT relation_is_a_known_shard(pg_class.oid)) + GROUP BY pg_namespace.oid +) q; +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +\c - postgres - :master_port + +GRANT ALL ON SCHEMA regular_schema TO test_non_super_user; + +\c - test_non_super_user - :master_port + +SET citus.next_shard_id TO 2094200; +SET client_min_messages TO WARNING; + +CREATE TABLE regular_schema.fkey_test_reference_table(a int PRIMARY KEY); +SELECT create_reference_table('regular_schema.fkey_test_reference_table'); + +-- gets automatically converted to a citus local table +CREATE TABLE regular_schema.fkey_test_citus_local_1(a int PRIMARY KEY REFERENCES regular_schema.fkey_test_reference_table(a)); + +ALTER TABLE regular_schema.fkey_test_reference_table ADD CONSTRAINT fkey_to_drop FOREIGN KEY (a) REFERENCES regular_schema.fkey_test_citus_local_1(a); + +\c - postgres - :master_port + +ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2094250; + +\c - test_non_super_user - :worker_1_port + +SET citus.shard_replication_factor TO 1; +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; + +CREATE SCHEMA fkey_test_tenant; +CREATE TABLE fkey_test_tenant.t1(id int PRIMARY KEY REFERENCES regular_schema.fkey_test_reference_table(a)); +CREATE TABLE fkey_test_tenant.t2(id int PRIMARY KEY REFERENCES fkey_test_tenant.t1(id), + other int REFERENCES regular_schema.fkey_test_reference_table(a)); + +-- errors due to the foreign key "from" the reference table +ALTER TABLE regular_schema.fkey_test_citus_local_1 SET SCHEMA fkey_test_tenant; + +\c - test_non_super_user - :master_port + +SET citus.next_shard_id TO 2094300; +SET client_min_messages TO WARNING; + +ALTER TABLE regular_schema.fkey_test_reference_table DROP CONSTRAINT fkey_to_drop; + +\c - postgres - :master_port + +ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH 2094350; + +\c - test_non_super_user - :worker_1_port + +SET citus.shard_replication_factor TO 1; +SET citus.enable_schema_based_sharding TO ON; +SET client_min_messages TO NOTICE; + +-- succeeds +ALTER TABLE regular_schema.fkey_test_citus_local_1 SET SCHEMA fkey_test_tenant; + +CREATE TABLE regular_schema.fkey_test_local_1(a int PRIMARY KEY REFERENCES fkey_test_tenant.t1(id)); + +ALTER TABLE regular_schema.fkey_test_local_1 SET SCHEMA fkey_test_tenant; + +SELECT result FROM run_command_on_all_nodes( +$$ +WITH tables AS ( + SELECT unnest( + ARRAY[ + 'regular_schema.fkey_test_reference_table'::regclass, + 'fkey_test_tenant.fkey_test_citus_local_1'::regclass, + 'fkey_test_tenant.fkey_test_local_1'::regclass, + 'fkey_test_tenant.t1'::regclass, + 'fkey_test_tenant.t2'::regclass + ] + ) AS oid +) +SELECT array_agg(conname ORDER BY conname) FROM pg_constraint +WHERE contype = 'f' AND + ( + conrelid IN (SELECT oid FROM tables) OR + confrelid IN (SELECT oid FROM tables) + ) +$$ +) JOIN pg_dist_node USING (nodeid) ORDER BY nodeport; + +-- check referencing fkey contraints on placements +SELECT result FROM run_command_on_placements('regular_schema.fkey_test_reference_table', $$ SELECT array_agg(conname ORDER BY conname) FROM pg_constraint WHERE contype = 'f' AND conrelid = '%s'::regclass $$); +SELECT result FROM run_command_on_placements('fkey_test_tenant.fkey_test_citus_local_1', $$ SELECT array_agg(conname ORDER BY conname) FROM pg_constraint WHERE contype = 'f' AND conrelid = '%s'::regclass $$); +SELECT result FROM run_command_on_placements('fkey_test_tenant.fkey_test_local_1', $$ SELECT array_agg(conname ORDER BY conname) FROM pg_constraint WHERE contype = 'f' AND conrelid = '%s'::regclass $$); +SELECT result FROM run_command_on_placements('fkey_test_tenant.t1', $$ SELECT array_agg(conname ORDER BY conname) FROM pg_constraint WHERE contype = 'f' AND conrelid = '%s'::regclass $$); +SELECT result FROM run_command_on_placements('fkey_test_tenant.t2', $$ SELECT array_agg(conname ORDER BY conname) FROM pg_constraint WHERE contype = 'f' AND conrelid = '%s'::regclass $$); + +-- cleanup +\c - postgres - :master_port + +SET client_min_messages TO WARNING; +DROP SCHEMA regular_schema, tenant_12, tenant_13, fkey_test_tenant CASCADE; + +REVOKE ALL ON DATABASE regression FROM test_non_super_user; +DROP USER test_non_super_user; + +-- reset pg_dist_shardid_seq on the coordinator +DO $proc$ +DECLARE + v_last_value bigint; +BEGIN + SELECT last_value INTO v_last_value FROM pg_dist_shardid_seq_prev_state; + EXECUTE format('ALTER SEQUENCE pg_dist_shardid_seq RESTART WITH %s', v_last_value); +END$proc$; + +DROP TABLE pg_dist_shardid_seq_prev_state;