mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-03-12 04:36:29 -07:00
* add note about forceTcpRelay * Create a sample systemd unit for tcp proxy * set gitattributes for rust & cargo so hashes dont conflict on Windows * Revert "set gitattributes for rust & cargo so hashes dont conflict on Windows" This reverts commit 032dc5c108195f6bbc2e224f00da5b785df4b7f9. * Turn off autocrlf for rust source Doesn't appear to play nice well when it comes to git and vendored cargo package hashes * Fix #1883 (#1886) Still unknown as to why, but the call to `nc->GetProperties()` can fail when setting a friendly name on the Windows virtual ethernet adapter. Ensure that `ncp` is not null before continuing and accessing the device GUID. * Don't vendor packages for zeroidc (#1885) * Added docker environment way to join networks (#1871) * add StringUtils * fix headers use recommended headers and remove unused headers * move extern "C" only JNI functions need to be exported * cleanup * fix ANDROID-50: RESULT_ERROR_BAD_PARAMETER typo * fix typo in log message * fix typos in JNI method signatures * fix typo * fix ANDROID-51: fieldName is uninitialized * fix ANDROID-35: memory leak * fix missing DeleteLocalRef in loops * update to use unique error codes * add GETENV macro * add LOG_TAG defines * ANDROID-48: add ZT_jnicache.cpp * ANDROID-48: use ZT_jnicache.cpp and remove ZT_jnilookup.cpp and ZT_jniarray.cpp * add Event.fromInt * add PeerRole.fromInt * add ResultCode.fromInt * fix ANDROID-36: issues with ResultCode * add VirtualNetworkConfigOperation.fromInt * fix ANDROID-40: VirtualNetworkConfigOperation out-of-sync with ZT_VirtualNetworkConfigOperation enum * add VirtualNetworkStatus.fromInt * fix ANDROID-37: VirtualNetworkStatus out-of-sync with ZT_VirtualNetworkStatus enum * add VirtualNetworkType.fromInt * make NodeStatus a plain data class * fix ANDROID-52: synchronization bug with nodeMap * Node init work: separate Node construction and init * add Node.toString * make PeerPhysicalPath a plain data class * remove unused PeerPhysicalPath.fixed * add array functions * make Peer a plain data class * make Version a plain data class * fix ANDROID-42: copy/paste error * fix ANDROID-49: VirtualNetworkConfig.equals is wrong * reimplement VirtualNetworkConfig.equals * reimplement VirtualNetworkConfig.compareTo * add VirtualNetworkConfig.hashCode * make VirtualNetworkConfig a plain data class * remove unused VirtualNetworkConfig.enabled * reimplement VirtualNetworkDNS.equals * add VirtualNetworkDNS.hashCode * make VirtualNetworkDNS a plain data class * reimplement VirtualNetworkRoute.equals * reimplement VirtualNetworkRoute.compareTo * reimplement VirtualNetworkRoute.toString * add VirtualNetworkRoute.hashCode * make VirtualNetworkRoute a plain data class * add isSocketAddressEmpty * add addressPort * add fromSocketAddressObject * invert logic in a couple of places and return early * newInetAddress and newInetSocketAddress work allow newInetSocketAddress to return NULL if given empty address * fix ANDROID-38: stack corruption in onSendPacketRequested * use GETENV macro * JniRef work JniRef does not use callbacks struct, so remove fix NewGlobalRef / DeleteGlobalRef mismatch * use PRId64 macros * switch statement work * comments and logging * Modifier 'public' is redundant for interface members * NodeException can be made a checked Exception * 'NodeException' does not define a 'serialVersionUID' field * 'finalize()' should not be overridden this is fine to do because ZeroTierOneService calls close() when it is done * error handling, error reporting, asserts, logging * simplify loadLibrary * rename Node.networks -> Node.networkConfigs * Windows file permissions fix (#1887) * Allow macOS interfaces to use multiple IP addresses (#1879) Co-authored-by: Sean OMeara <someara@users.noreply.github.com> Co-authored-by: Grant Limberg <glimberg@users.noreply.github.com> * Fix condition where full HELLOs might not be sent when necessary (#1877) Co-authored-by: Grant Limberg <glimberg@users.noreply.github.com> * 1.10.4 version bumps * Add security policy to repo (#1889) * [+] add e2k64 arch (#1890) * temp fix for ANDROID-56: crash inside newNetworkConfig from too many args * 1.10.4 release notes * Windows 1.10.4 Advanced Installer bump * Revert "temp fix for ANDROID-56: crash inside newNetworkConfig from too many args" This reverts commit dd627cd7f44ad623a110bb14f72d0bea72a09e30. * actual fix for ANDROID-56: crash inside newNetworkConfig cast all arguments to varargs functions as good style * Fix addIp being called with applied ips (#1897) This was getting called outside of the check for existing ips Because of the added ifdef and a brace getting moved to the wrong place. ``` if (! n.tap()->addIp(*ip)) { fprintf(stderr, "ERROR: unable to add ip address %s" ZT_EOL_S, ip->toString(ipbuf)); } WinFWHelper::newICMPRule(*ip, n.config().nwid); ``` * 1.10.5 (#1905) * 1.10.5 bump * 1.10.5 for Windows * 1.10.5 * Prevent path-learning loops (#1914) * Prevent path-learning loops * Only allow new overwrite if not bonded * fix binding temporary ipv6 addresses on macos (#1910) The check code wasn't running. I don't know why !defined(TARGET_OS_IOS) would exclude code on desktop macOS. I did a quick search and changed it to defined(TARGET_OS_MAC). Not 100% sure what the most correct solution there is. You can verify the old and new versions with `ifconfig | grep temporary` plus `zerotier-cli info -j` -> listeningOn * 1.10.6 (#1929) * 1.10.5 bump * 1.10.6 * 1.10.6 AIP for Windows. * Release notes for 1.10.6 (#1931) * Minor tweak to Synology Docker image script (#1936) * Change if_def again so ios can build (#1937) All apple's variables are "defined" but sometimes they are defined as "0" * move begin/commit into try/catch block (#1932) Thread was exiting in some cases * Bump openssl from 0.10.45 to 0.10.48 in /zeroidc (#1938) Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.45 to 0.10.48. - [Release notes](https://github.com/sfackler/rust-openssl/releases) - [Commits](https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.45...openssl-v0.10.48) --- updated-dependencies: - dependency-name: openssl dependency-type: indirect ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * new drone bits * Fix multiple network join from environment entrypoint.sh.release (#1961) * _bond_m guards _bond, not _paths_m (#1965) * Fix: warning: mutex '_aqm_m' is not held on every path through here [-Wthread-safety-analysis] (#1964) * Bump h2 from 0.3.16 to 0.3.17 in /zeroidc (#1963) Bumps [h2](https://github.com/hyperium/h2) from 0.3.16 to 0.3.17. - [Release notes](https://github.com/hyperium/h2/releases) - [Changelog](https://github.com/hyperium/h2/blob/master/CHANGELOG.md) - [Commits](https://github.com/hyperium/h2/compare/v0.3.16...v0.3.17) --- updated-dependencies: - dependency-name: h2 dependency-type: indirect ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Grant Limberg <glimberg@users.noreply.github.com> * Add note that binutils is required on FreeBSD (#1968) * Add prometheus metrics for Central controllers (#1969) * add header-only prometheus lib to ext * rename folder * Undo rename directory * prometheus simpleapi included on mac & linux * wip * wire up some controller stats * Get windows building with prometheus * bsd build flags for prometheus * Fix multiple network join from environment entrypoint.sh.release (#1961) * _bond_m guards _bond, not _paths_m (#1965) * Fix: warning: mutex '_aqm_m' is not held on every path through here [-Wthread-safety-analysis] (#1964) * Serve prom metrics from /metrics endpoint * Add prom metrics for Central controller specific things * reorganize metric initialization * testing out a labled gauge on Networks * increment error counter on throw * Consolidate metrics definitions Put all metric definitions into node/Metrics.hpp. Accessed as needed from there. * Revert "testing out a labled gauge on Networks" This reverts commit 499ed6d95e11452019cdf48e32ed4cd878c2705b. * still blows up but adding to the record for completeness right now * Fix runtime issues with metrics * Add metrics files to visual studio project * Missed an "extern" * add copyright headers to new files * Add metrics for sent/received bytes (total) * put /metrics endpoint behind auth * sendto returns int on Win32 --------- Co-authored-by: Leonardo Amaral <leleobhz@users.noreply.github.com> Co-authored-by: Brenton Bostick <bostick@gmail.com> * Central startup update (#1973) * allow specifying authtoken in central startup * set allowManagedFrom * move redis_mem_notification to the correct place * add node checkins metric * wire up min/max connection pool size metrics * x86_64-unknown-linux-gnu on ubuntu runner (#1975) * adding incoming zt packet type metrics (#1976) * use cpp-httplib for HTTP control plane (#1979) refactored the old control plane code to use [cpp-httplib](https://github.com/yhirose/cpp-httplib) instead of a hand rolled HTTP server. Makes the control plane code much more legible. Also no longer randomly stops responding. * Outgoing Packet Metrics (#1980) add tx/rx labels to packet counters and add metrics for outgoing packets * Add short-term validation test workflow (#1974) Add short-term validation test workflow * Brenton/curly braces (#1971) * fix formatting * properly adjust various lines breakup multiple statements onto multiple lines * insert {} around if, for, etc. * Fix rust dependency caching (#1983) * fun with rust caching * kick * comment out invalid yaml keys for now * Caching should now work * re-add/rename key directives * bump * bump * bump * Don't force rebuild on Windows build GH Action (#1985) Switching `/t:ZeroTierOne:Rebuild` to just `/t:ZeroTierOne` allows the Windows build to use the rust cache. `/t:ZeroTierOne:Rebuild` cleared the cache before building. * More packet metrics (#1982) * found path negotation sends that weren't accounted for * Fix histogram so it will actually compile * Found more places for packet metrics * separate the bind & listen calls on the http backplane (#1988) * fix memory leak (#1992) * fix a couple of metrics (#1989) * More aggressive CLI spamming (#1993) * fix type signatures (#1991) * Network-metrics (#1994) * Add a couple quick functions for converting a uint64_t network ID/node ID into std::string * Network metrics * Peer metrics (#1995) * Adding peer metrics still need to be wired up for use * per peer packet metrics * Fix crash from bad instantiation of histogram * separate alive & dead path counts * Add peer metric update block * add peer latency values in doPingAndKeepalive * prevent deadlock * peer latency histogram actually works now * cleanup * capture counts of packets to specific peers --------- Co-authored-by: Joseph Henry <joseph.henry@zerotier.com> * Metrics consolidation (#1997) * Rename zt_packet_incoming -> zt_packet Also consolidate zt_peer_packets into a single metric with tx and rx labels. Same for ztc_tcp_data and ztc_udp_data * Further collapse tcp & udp into metric labels for zt_data * Fix zt_data metric description * zt_peer_packets description fix * Consolidate incoming/outgoing network packets to a single metric * zt_incoming_packet_error -> zt_packet_error * Disable peer metrics for central controllers Can change in the future if needed, but given the traffic our controllers serve, that's going to be a *lot* of data * Disable peer metrics for controllers pt 2 * Update readme files for metrics (#2000) * Controller Metrics & Network Config Request Fix (#2003) * add new metrics for network config request queue size and sso expirations * move sso expiration to its own thread in the controller * fix potential undefined behavior when modifying a set * Enable RTTI in Windows build The new prometheus histogram stuff needs it. Access violation - no RTTI data!INVALID packet 636ebd9ee8cac6c0 from cafe9efeb9(2605:9880:200:1200:30:571:e34:51/9993) (unexpected exception in tryDecode()) * Don't re-apply routes on BSD See issue #1986 * Capture setContent by-value instead of by-reference (#2006) Co-authored-by: Grant Limberg <glimberg@users.noreply.github.com> * fix typos (#2010) * central controller metrics & request path updates (#2012) * internal db metrics * use shared mutexes for read/write locks * remove this lock. only used for a metric * more metrics * remove exploratory metrics place controller request benchmarks behind ifdef * Improve validation test (#2013) * fix init order for EmbeddedNetworkController (#2014) * add constant for getifaddrs cache time * cache getifaddrs - mac * cache getifaddrs - linux * cache getifaddrs - bsd * cache getifaddrs - windows * Fix oidc client lookup query join condition referenced the wrong table. Worked fine unless there were multiple identical client IDs * Fix udp sent metric was only incrementing by 1 for each packet sent * Allow sending all surface addresses to peer in low-bandwidth mode * allow enabling of low bandwidth mode on controllers * don't unborrow bad connections pool will clean them up later * Multi-arch controller container (#2037) create arm64 & amd64 images for central controller * Update README.md issue #2009 * docker tags change * fix oidc auth url memory leak (#2031) getAuthURL() was not calling zeroidc::free_cstr(url); the only place authAuthURL is called, the url can be retrieved from the network config instead. You could alternatively copy the string and call free_cstr in getAuthURL. If that's better we can change the PR. Since now there are no callers of getAuthURL I deleted it. Co-authored-by: Grant Limberg <glimberg@users.noreply.github.com> * Bump openssl from 0.10.48 to 0.10.55 in /zeroidc (#2034) Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.48 to 0.10.55. - [Release notes](https://github.com/sfackler/rust-openssl/releases) - [Commits](https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.48...openssl-v0.10.55) --- updated-dependencies: - dependency-name: openssl dependency-type: indirect ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Grant Limberg <glimberg@users.noreply.github.com> * zeroidc cargo warnings (#2029) * fix unused struct member cargo warning * fix unused import cargo warning * fix unused return value cargo warning --------- Co-authored-by: Grant Limberg <glimberg@users.noreply.github.com> * fix memory leak in macos ipv6/dns helper (#2030) Co-authored-by: Grant Limberg <glimberg@users.noreply.github.com> * Consider ZEROTIER_JOIN_NETWORKS in healthcheck (#1978) * Add a 2nd auth token only for access to /metrics (#2043) * Add a 2nd auth token for /metrics Allows administrators to distribute a token that only has access to read metrics and nothing else. Also added support for using bearer auth tokens for both types of tokens Separate endpoint for metrics #2041 * Update readme * fix a couple of cases of writing the wrong token * Add warning to cli for allow default on FreeBSD It doesn't work. Not possible to fix with deficient network stack and APIs. ZeroTierOne-freebsd # zerotier-cli set 9bee8941b5xxxxxx allowDefault=1 400 set Allow Default does not work properly on FreeBSD. See #580 root@freebsd13-a:~/ZeroTierOne-freebsd # zerotier-cli get 9bee8941b5xxxxxx allowDefault 1 * ARM64 Support for TapDriver6 (#1949) * Release memory previously allocated by UPNP_GetValidIGD * Fix ifdef that breaks libzt on iOS (#2050) * less drone (#2060) * Exit if loading an invalid identity from disk (#2058) * Exit if loading an invalid identity from disk Previously, if an invalid identity was loaded from disk, ZeroTier would generate a new identity & chug along and generate a brand new identity as if nothing happened. When running in containers, this introduces the possibility for key matter loss; especially when running in containers where the identity files are mounted in the container read only. In this case, ZT will continue chugging along with a brand new identity with no possibility of recovering the private key. ZeroTier should exit upon loading of invalid identity.public/identity.secret #2056 * add validation test for #2056 * tcp-proxy: fix build * Adjust tcp-proxy makefile to support metrics There's no way to get the metrics yet. Someone will have to add the http service. * remove ZT_NO_METRIC ifdef * Implement recvmmsg() for Linux to reduce syscalls. (#2046) Between 5% and 40% speed improvement on Linux, depending on system configuration and load. * suppress warnings: comparison of integers of different signs: 'int64_t' (aka 'long') and 'uint64_t' (aka 'unsigned long') [-Wsign-compare] (#2063) * fix warning: 'OS_STRING' macro redefined [-Wmacro-redefined] (#2064) Even though this is in ext, these particular chunks of code were added by us, so are ok to modify. * Apply default route a different way - macOS The original way we applied default route, by forking 0.0.0.0/0 into 0/1 and 128/1 works, but if mac os has any networking hiccups -if you change SSIDs or sleep/wake- macos erases the system default route. And then all networking on the computer is broken. to summarize the new way: allowDefault=1 ``` sudo route delete default 192.168.82.1 sudo route add default 10.2.0.2 sudo route add -ifscope en1 default 192.168.82.1 ``` gives us this routing table ``` Destination Gateway RT_IFA Flags Refs Use Mtu Netif Expire rtt(ms) rttvar(ms) default 10.2.0.2 10.2.0.18 UGScg 90 1 2800 feth4823 default 192.168.82.1 192.168.82.217 UGScIg ``` allowDefault=0 ``` sudo route delete default sudo route delete -ifscope en1 default sudo route add default 192.168.82.1 ``` Notice the I flag, for -ifscope, on the physical default route. route change does not seem to work reliably. * fix docker tag for controllers (#2066) * Update build.sh (#2068) fix mkwork compilation errors * Fix network DNS on macOS It stopped working for ipv4 only networks in Monterey. See #1696 We add some config like so to System Configuration ``` scutil show State:/Network/Service/9bee8941b5xxxxxx/IPv4 <dictionary> { Addresses : <array> { 0 : 10.2.1.36 } InterfaceName : feth4823 Router : 10.2.1.36 ServerAddress : 127.0.0.1 } ``` * Add search domain to macos dns configuration Stumbled upon this while debugging something else. If we add search domain to our system configuration for network DNS, then search domains work: ``` ping server1 ~ PING server1.my.domain (10.123.3.1): 56 data bytes 64 bytes from 10.123.3.1 ``` * Fix reporting of secondaryPort and tertiaryPort See: #2039 * Fix typos (#2075) * Disable executable stacks on assembly objects (#2071) Add `--noexecstack` to the assembler flags so the resulting binary will link with a non-executable stack. Fixes zerotier/ZeroTierOne#1179 Co-authored-by: Joseph Henry <joseph.henry@zerotier.com> * Test that starting zerotier before internet works * Don't skip hellos when there are no paths available working on #2082 * Update validate-1m-linux.sh * Save zt node log files on abort * Separate test and summary step in validator script * Don't apply default route until zerotier is "online" I was running into issues with restarting the zerotier service while "full tunnel" mode is enabled. When zerotier first boots, it gets network state from the cache on disk. So it immediately applies all the routes it knew about before it shutdown. The network config may have change in this time. If it has, then your default route is via a route you are blocked from talking on. So you can't get the current network config, so your internet does not work. Other options include - don't use cached network state on boot - find a better criteria than "online" * Fix node time-to-online counter in validator script * Export variables so that they are accessible by exit function * Fix PortMapper issue on ZeroTier startup See issue #2082 We use a call to libnatpmp::ininatpp to make sure the computer has working network sockets before we go into the main nat-pmp/upnp logic. With basic exponenetial delay up to 30 seconds. * testing * Comment out PortMapper debug this got left turned on in a confusing merge previously * fix macos default route again see commit fb6af1971 * Fix network DNS on macOS adding that stuff to System Config causes this extra route to be added which breaks ipv4 default route. We figured out a weird System Coniguration setting that works. --- old couldn't figure out how to fix it in SystemConfiguration so here we are# Please enter the commit message for your changes. Lines starting We also moved the dns setter to before the syncIps stuff to help with a race condition. It didn't always work when you re-joined a network with default route enabled. * Catch all conditions in switch statement, remove trailing whitespaces * Add setmtu command, fix bond lifetime issue * Basic cleanups * Check if null is passed to VirtualNetworkConfig.equals and name fixes * ANDROID-96: Simplify and use return code from node_init directly * Windows arm64 (#2099) * ARM64 changes for 1.12 * 1.12 Windows advanced installer updates and updates for ARM64 * 1.12.0 * Linux build fixes for old distros. * release notes --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: travis laduke <travisladuke@gmail.com> Co-authored-by: Grant Limberg <grant.limberg@zerotier.com> Co-authored-by: Grant Limberg <glimberg@users.noreply.github.com> Co-authored-by: Leonardo Amaral <leleobhz@users.noreply.github.com> Co-authored-by: Brenton Bostick <bostick@gmail.com> Co-authored-by: Sean OMeara <someara@users.noreply.github.com> Co-authored-by: Joseph Henry <joseph-henry@users.noreply.github.com> Co-authored-by: Roman Peshkichev <roman.peshkichev@gmail.com> Co-authored-by: Joseph Henry <joseph.henry@zerotier.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Stavros Kois <47820033+stavros-k@users.noreply.github.com> Co-authored-by: Jake Vis <jakevis@outlook.com> Co-authored-by: Jörg Thalheim <joerg@thalheim.io> Co-authored-by: lison <imlison@foxmail.com> Co-authored-by: Kenny MacDermid <kenny@macdermid.ca>
456 lines
17 KiB
C++
456 lines
17 KiB
C++
/* Definition of the pqxx::stream_to class.
|
|
*
|
|
* pqxx::stream_to enables optimized batch updates to a database table.
|
|
*
|
|
* DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/stream_to.hxx instead.
|
|
*
|
|
* Copyright (c) 2000-2022, Jeroen T. Vermeulen.
|
|
*
|
|
* See COPYING for copyright license. If you did not receive a file called
|
|
* COPYING with this source code, please notify the distributor of this
|
|
* mistake, or contact the author.
|
|
*/
|
|
#ifndef PQXX_H_STREAM_TO
|
|
#define PQXX_H_STREAM_TO
|
|
|
|
#if !defined(PQXX_HEADER_PRE)
|
|
# error "Include libpqxx headers as <pqxx/header>, not <pqxx/header.hxx>."
|
|
#endif
|
|
|
|
#include "pqxx/separated_list.hxx"
|
|
#include "pqxx/transaction_base.hxx"
|
|
|
|
|
|
namespace pqxx
|
|
{
|
|
/// Efficiently write data directly to a database table.
|
|
/** If you wish to insert rows of data into a table, you can compose INSERT
|
|
* statements and execute them. But it's slow and tedious, and you need to
|
|
* worry about quoting and escaping the data.
|
|
*
|
|
* If you're just inserting a single row, it probably won't matter much. You
|
|
* can use prepared or parameterised statements to take care of the escaping
|
|
* for you. But if you're inserting large numbers of rows you will want
|
|
* something better.
|
|
*
|
|
* Inserting rows one by one using INSERT statements involves a lot of
|
|
* pointless overhead, especially when you are working with a remote database
|
|
* server over the network. You may end up sending each row over the network
|
|
* as a separate query, and waiting for a reply. Do it "in bulk" using
|
|
* `stream_to`, and you may find that it goes many times faster. Sometimes
|
|
* you gain orders of magnitude in speed.
|
|
*
|
|
* Here's how it works: you create a `stream_to` stream to start writing to
|
|
* your table. You will probably want to specify the columns. Then, you
|
|
* feed your data into the stream one row at a time. And finally, you call the
|
|
* stream's @ref complete function to tell it to finalise the operation, wait
|
|
* for completion, and check for errors.
|
|
*
|
|
* (You _must_ complete the stream before committing or aborting the
|
|
* transaction. The connection is in a special state while the stream is
|
|
* active, where it can't process commands, and can't commit or abort a
|
|
* transaction.)
|
|
*
|
|
* So how do you feed a row of data into the stream? There's several ways, but
|
|
* the preferred one is to call its @ref write_values. Pass the field values
|
|
* as arguments. Doesn't matter what type they are, as long as libpqxx knows
|
|
* how to convert them to PostgreSQL's text format: `int`, `std::string` or
|
|
* `std:string_view`, `float` and `double`, `bool`... lots of basic types
|
|
* are supported. If some of the values are null, feel free to use
|
|
* `std::optional`, `std::shared_ptr`, or `std::unique_ptr`.
|
|
*
|
|
* The arguments' types don't even have to match the fields' SQL types. If you
|
|
* want to insert an `int` into a `DECIMAL` column, that's your choice -- it
|
|
* will produce a `DECIMAL` value which happens to be integral. Insert a
|
|
* `float` into a `VARCHAR` column? That's fine, you'll get a string whose
|
|
* contents happen to read like a number. And so on. You can even insert
|
|
* different types of value in the same column on different rows. If you have
|
|
* a code path where a particular field is always null, just insert `nullptr`.
|
|
*
|
|
* There is another way to insert rows: the `<<` ("shift-left") operator.
|
|
* It's not as fast and it doesn't support variable arguments: each row must be
|
|
* either a `std::tuple` or something iterable, such as a `std::vector`, or
|
|
* anything else with a `begin()` and `end()`.
|
|
*
|
|
* @warning While a stream is active, you cannot execute queries, open a
|
|
* pipeline, etc. on the same transaction. A transaction can have at most one
|
|
* object of a type derived from @ref pqxx::transaction_focus active on it at a
|
|
* time.
|
|
*/
|
|
class PQXX_LIBEXPORT stream_to : transaction_focus
|
|
{
|
|
public:
|
|
/// Stream data to a pre-quoted table and columns.
|
|
/** This factory can be useful when it's not convenient to provide the
|
|
* columns list in the form of a `std::initializer_list`, or when the list
|
|
* of columns is simply not known at compile time.
|
|
*
|
|
* Also use this if you need to create multiple streams using the same table
|
|
* path and/or columns list, and you want to save a bit of work on composing
|
|
* the internal SQL statement for starting the stream. It lets you compose
|
|
* the string representations for the table path and the columns list, so you
|
|
* can compute these once and then re-use them later.
|
|
*
|
|
* @param tx The transaction within which the stream will operate.
|
|
* @param path Name or path for the table upon which the stream will
|
|
* operate. If any part of the table path may contain special
|
|
* characters or be case-sensitive, quote the path using
|
|
* pqxx::connection::quote_table().
|
|
* @param columns Columns to which the stream will write. They should be
|
|
* comma-separated and, if needed, quoted. You can produce the string
|
|
* using pqxx::connection::quote_columns(). If you omit this argument,
|
|
* the stream will write all columns in the table, in schema order.
|
|
*/
|
|
static stream_to raw_table(
|
|
transaction_base &tx, std::string_view path, std::string_view columns = "")
|
|
{
|
|
return {tx, path, columns};
|
|
}
|
|
|
|
/// Create a `stream_to` writing to a named table and columns.
|
|
/** Use this to stream data to a table, where the list of columns is known at
|
|
* compile time.
|
|
*
|
|
* @param tx The transaction within which the stream will operate.
|
|
* @param path A @ref table_path designating the target table.
|
|
* @param columns Optionally, the columns to which the stream should write.
|
|
* If you do not pass this, the stream will write to all columns in the
|
|
* table, in schema order.
|
|
*/
|
|
static stream_to table(
|
|
transaction_base &tx, table_path path,
|
|
std::initializer_list<std::string_view> columns = {})
|
|
{
|
|
auto const &conn{tx.conn()};
|
|
return raw_table(tx, conn.quote_table(path), conn.quote_columns(columns));
|
|
}
|
|
|
|
#if defined(PQXX_HAVE_CONCEPTS)
|
|
/// Create a `stream_to` writing to a named table and columns.
|
|
/** Use this version to stream data to a table, when the list of columns is
|
|
* not known at compile time.
|
|
*
|
|
* @param tx The transaction within which the stream will operate.
|
|
* @param path A @ref table_path designating the target table.
|
|
* @param columns The columns to which the stream should write.
|
|
*/
|
|
template<PQXX_CHAR_STRINGS_ARG COLUMNS>
|
|
static stream_to
|
|
table(transaction_base &tx, table_path path, COLUMNS const &columns)
|
|
{
|
|
auto const &conn{tx.conn()};
|
|
return stream_to::raw_table(
|
|
tx, conn.quote_table(path), tx.conn().quote_columns(columns));
|
|
}
|
|
|
|
/// Create a `stream_to` writing to a named table and columns.
|
|
/** Use this version to stream data to a table, when the list of columns is
|
|
* not known at compile time.
|
|
*
|
|
* @param tx The transaction within which the stream will operate.
|
|
* @param path A @ref table_path designating the target table.
|
|
* @param columns The columns to which the stream should write.
|
|
*/
|
|
template<PQXX_CHAR_STRINGS_ARG COLUMNS>
|
|
static stream_to
|
|
table(transaction_base &tx, std::string_view path, COLUMNS const &columns)
|
|
{
|
|
return stream_to::raw_table(tx, path, tx.conn().quote_columns(columns));
|
|
}
|
|
#endif // PQXX_HAVE_CONCEPTS
|
|
|
|
/// Create a stream, without specifying columns.
|
|
/** @deprecated Use @ref table or @ref raw_table as a factory.
|
|
*
|
|
* Fields will be inserted in whatever order the columns have in the
|
|
* database.
|
|
*
|
|
* You'll probably want to specify the columns, so that the mapping between
|
|
* your data fields and the table is explicit in your code, and not hidden
|
|
* in an "implicit contract" between your code and your schema.
|
|
*/
|
|
[[deprecated("Use table() or raw_table() factory.")]] stream_to(
|
|
transaction_base &tx, std::string_view table_name) :
|
|
stream_to{tx, table_name, ""sv}
|
|
{}
|
|
|
|
/// Create a stream, specifying column names as a container of strings.
|
|
/** @deprecated Use @ref table or @ref raw_table as a factory.
|
|
*/
|
|
template<typename Columns>
|
|
[[deprecated("Use table() or raw_table() factory.")]] stream_to(
|
|
transaction_base &, std::string_view table_name, Columns const &columns);
|
|
|
|
/// Create a stream, specifying column names as a sequence of strings.
|
|
/** @deprecated Use @ref table or @ref raw_table as a factory.
|
|
*/
|
|
template<typename Iter>
|
|
[[deprecated("Use table() or raw_table() factory.")]] stream_to(
|
|
transaction_base &, std::string_view table_name, Iter columns_begin,
|
|
Iter columns_end);
|
|
|
|
~stream_to() noexcept;
|
|
|
|
/// Does this stream still need to @ref complete()?
|
|
[[nodiscard]] constexpr operator bool() const noexcept
|
|
{
|
|
return not m_finished;
|
|
}
|
|
/// Has this stream been through its concluding @c complete()?
|
|
[[nodiscard]] constexpr bool operator!() const noexcept
|
|
{
|
|
return m_finished;
|
|
}
|
|
|
|
/// Complete the operation, and check for errors.
|
|
/** Always call this to close the stream in an orderly fashion, even after
|
|
* an error. (In the case of an error, abort the transaction afterwards.)
|
|
*
|
|
* The only circumstance where it's safe to skip this is after an error, if
|
|
* you're discarding the entire connection.
|
|
*/
|
|
void complete();
|
|
|
|
/// Insert a row of data.
|
|
/** Returns a reference to the stream, so you can chain the calls.
|
|
*
|
|
* The @c row can be a tuple, or any type that can be iterated. Each
|
|
* item becomes a field in the row, in the same order as the columns you
|
|
* specified when creating the stream.
|
|
*
|
|
* If you don't already happen to have your fields in the form of a tuple or
|
|
* container, prefer @c write_values. It's faster and more convenient.
|
|
*/
|
|
template<typename Row> stream_to &operator<<(Row const &row)
|
|
{
|
|
write_row(row);
|
|
return *this;
|
|
}
|
|
|
|
/// Stream a `stream_from` straight into a `stream_to`.
|
|
/** This can be useful when copying between different databases. If the
|
|
* source and the destination are on the same database, you'll get better
|
|
* performance doing it all in a regular query.
|
|
*/
|
|
stream_to &operator<<(stream_from &);
|
|
|
|
/// Insert a row of data, given in the form of a @c std::tuple or container.
|
|
/** The @c row can be a tuple, or any type that can be iterated. Each
|
|
* item becomes a field in the row, in the same order as the columns you
|
|
* specified when creating the stream.
|
|
*
|
|
* The preferred way to insert a row is @c write_values.
|
|
*/
|
|
template<typename Row> void write_row(Row const &row)
|
|
{
|
|
fill_buffer(row);
|
|
write_buffer();
|
|
}
|
|
|
|
/// Insert values as a row.
|
|
/** This is the recommended way of inserting data. Pass your field values,
|
|
* of any convertible type.
|
|
*/
|
|
template<typename... Ts> void write_values(Ts const &...fields)
|
|
{
|
|
fill_buffer(fields...);
|
|
write_buffer();
|
|
}
|
|
|
|
private:
|
|
/// Stream a pre-quoted table name and columns list.
|
|
stream_to(
|
|
transaction_base &tx, std::string_view path, std::string_view columns);
|
|
|
|
bool m_finished = false;
|
|
|
|
/// Reusable buffer for a row. Saves doing an allocation for each row.
|
|
std::string m_buffer;
|
|
|
|
/// Reusable buffer for converting/escaping a field.
|
|
std::string m_field_buf;
|
|
|
|
/// Glyph scanner, for parsing the client encoding.
|
|
internal::glyph_scanner_func *m_scanner;
|
|
|
|
/// Write a row of raw text-format data into the destination table.
|
|
void write_raw_line(std::string_view);
|
|
|
|
/// Write a row of data from @c m_buffer into the destination table.
|
|
/** Resets the buffer for the next row.
|
|
*/
|
|
void write_buffer();
|
|
|
|
/// COPY encoding for a null field, plus subsequent separator.
|
|
static constexpr std::string_view null_field{"\\N\t"};
|
|
|
|
/// Estimate buffer space needed for a field which is always null.
|
|
template<typename T>
|
|
static std::enable_if_t<nullness<T>::always_null, std::size_t>
|
|
estimate_buffer(T const &)
|
|
{
|
|
return std::size(null_field);
|
|
}
|
|
|
|
/// Estimate buffer space needed for field f.
|
|
/** The estimate is not very precise. We don't actually know how much space
|
|
* we'll need once the escaping comes in.
|
|
*/
|
|
template<typename T>
|
|
static std::enable_if_t<not nullness<T>::always_null, std::size_t>
|
|
estimate_buffer(T const &field)
|
|
{
|
|
return is_null(field) ? std::size(null_field) : size_buffer(field);
|
|
}
|
|
|
|
/// Append escaped version of @c data to @c m_buffer, plus a tab.
|
|
void escape_field_to_buffer(std::string_view data);
|
|
|
|
/// Append string representation for @c f to @c m_buffer.
|
|
/** This is for the general case, where the field may contain a value.
|
|
*
|
|
* Also appends a tab. The tab is meant to be a separator, not a terminator,
|
|
* so if you write any fields at all, you'll end up with one tab too many
|
|
* at the end of the buffer.
|
|
*/
|
|
template<typename Field>
|
|
std::enable_if_t<not nullness<Field>::always_null>
|
|
append_to_buffer(Field const &f)
|
|
{
|
|
// We append each field, terminated by a tab. That will leave us with
|
|
// one tab too many, assuming we write any fields at all; we remove that
|
|
// at the end.
|
|
if (is_null(f))
|
|
{
|
|
// Easy. Append null and tab in one go.
|
|
m_buffer.append(null_field);
|
|
}
|
|
else
|
|
{
|
|
// Convert f into m_buffer.
|
|
|
|
using traits = string_traits<Field>;
|
|
auto const budget{estimate_buffer(f)};
|
|
auto const offset{std::size(m_buffer)};
|
|
|
|
if constexpr (std::is_arithmetic_v<Field>)
|
|
{
|
|
// Specially optimised for "safe" types, which never need any
|
|
// escaping. Convert straight into m_buffer.
|
|
|
|
// The budget we get from size_buffer() includes room for the trailing
|
|
// zero, which we must remove. But we're also inserting tabs between
|
|
// fields, so we re-purpose the extra byte for that.
|
|
auto const total{offset + budget};
|
|
m_buffer.resize(total);
|
|
auto const data{m_buffer.data()};
|
|
char *const end{traits::into_buf(data + offset, data + total, f)};
|
|
*(end - 1) = '\t';
|
|
// Shrink to fit. Keep the tab though.
|
|
m_buffer.resize(static_cast<std::size_t>(end - data));
|
|
}
|
|
else if constexpr (
|
|
std::is_same_v<Field, std::string> or
|
|
std::is_same_v<Field, std::string_view> or
|
|
std::is_same_v<Field, zview>)
|
|
{
|
|
// This string may need escaping.
|
|
m_field_buf.resize(budget);
|
|
escape_field_to_buffer(f);
|
|
}
|
|
else
|
|
{
|
|
// This field needs to be converted to a string, and after that,
|
|
// escaped as well.
|
|
m_field_buf.resize(budget);
|
|
auto const data{m_field_buf.data()};
|
|
escape_field_to_buffer(
|
|
traits::to_buf(data, data + std::size(m_field_buf), f));
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Append string representation for a null field to @c m_buffer.
|
|
/** This special case is for types which are always null.
|
|
*
|
|
* Also appends a tab. The tab is meant to be a separator, not a terminator,
|
|
* so if you write any fields at all, you'll end up with one tab too many
|
|
* at the end of the buffer.
|
|
*/
|
|
template<typename Field>
|
|
std::enable_if_t<nullness<Field>::always_null>
|
|
append_to_buffer(Field const &)
|
|
{
|
|
m_buffer.append(null_field);
|
|
}
|
|
|
|
/// Write raw COPY line into @c m_buffer, based on a container of fields.
|
|
template<typename Container>
|
|
std::enable_if_t<not std::is_same_v<typename Container::value_type, char>>
|
|
fill_buffer(Container const &c)
|
|
{
|
|
// To avoid unnecessary allocations and deallocations, we run through c
|
|
// twice: once to determine how much buffer space we may need, and once to
|
|
// actually write it into the buffer.
|
|
std::size_t budget{0};
|
|
for (auto const &f : c) budget += estimate_buffer(f);
|
|
m_buffer.reserve(budget);
|
|
for (auto const &f : c) append_to_buffer(f);
|
|
}
|
|
|
|
/// Estimate how many buffer bytes we need to write tuple.
|
|
template<typename Tuple, std::size_t... indexes>
|
|
static std::size_t
|
|
budget_tuple(Tuple const &t, std::index_sequence<indexes...>)
|
|
{
|
|
return (estimate_buffer(std::get<indexes>(t)) + ...);
|
|
}
|
|
|
|
/// Write tuple of fields to @c m_buffer.
|
|
template<typename Tuple, std::size_t... indexes>
|
|
void append_tuple(Tuple const &t, std::index_sequence<indexes...>)
|
|
{
|
|
(append_to_buffer(std::get<indexes>(t)), ...);
|
|
}
|
|
|
|
/// Write raw COPY line into @c m_buffer, based on a tuple of fields.
|
|
template<typename... Elts> void fill_buffer(std::tuple<Elts...> const &t)
|
|
{
|
|
using indexes = std::make_index_sequence<sizeof...(Elts)>;
|
|
|
|
m_buffer.reserve(budget_tuple(t, indexes{}));
|
|
append_tuple(t, indexes{});
|
|
}
|
|
|
|
/// Write raw COPY line into @c m_buffer, based on varargs fields.
|
|
template<typename... Ts> void fill_buffer(const Ts &...fields)
|
|
{
|
|
(..., append_to_buffer(fields));
|
|
}
|
|
|
|
constexpr static std::string_view s_classname{"stream_to"};
|
|
};
|
|
|
|
|
|
template<typename Columns>
|
|
inline stream_to::stream_to(
|
|
transaction_base &tx, std::string_view table_name, Columns const &columns) :
|
|
stream_to{tx, table_name, std::begin(columns), std::end(columns)}
|
|
{}
|
|
|
|
|
|
template<typename Iter>
|
|
inline stream_to::stream_to(
|
|
transaction_base &tx, std::string_view table_name, Iter columns_begin,
|
|
Iter columns_end) :
|
|
stream_to{
|
|
tx,
|
|
tx.quote_name(
|
|
table_name,
|
|
separated_list(",", columns_begin, columns_end, [&tx](auto col) {
|
|
return tx.quote_name(*col);
|
|
}))}
|
|
{}
|
|
} // namespace pqxx
|
|
#endif
|