mirror of https://github.com/mongodb/mongo
SERVER-105522: Upgrade librdkafka to v2.11.0 (#38821)
GitOrigin-RevId: b83cabc5300cb3f7778f555dbc5556caf5827670
This commit is contained in:
parent
cf499f7f2c
commit
948c54be5f
|
|
@ -47,7 +47,7 @@ a notice will be included in
|
|||
| [jbeder/yaml-cpp] | MIT | 0.6.3 | | ✗ |
|
||||
| [JSON-Schema-Test-Suite] | Unknown License | Unknown | | |
|
||||
| [libmongocrypt] | Apache-2.0 | 1.14.0 | ✗ | ✗ |
|
||||
| [librdkafka - the Apache Kafka C/C++ client library] | BSD-3-Clause, Xmlproc License, ISC, MIT, Public Domain, Zlib, BSD-2-Clause, Andreas Stolcke License | 2.0.2 | | ✗ |
|
||||
| [librdkafka - the Apache Kafka C/C++ client library] | BSD-3-Clause, Xmlproc License, ISC, MIT, Public Domain, Zlib, BSD-2-Clause, Andreas Stolcke License | 2.11.1 | | ✗ |
|
||||
| [LibTomCrypt] | WTFPL, Public Domain | 1.18.2 | ✗ | ✗ |
|
||||
| [libunwind/libunwind] | MIT | v1.8.1 | | ✗ |
|
||||
| [linenoise] | BSD-2-Clause | Unknown | | ✗ |
|
||||
|
|
|
|||
|
|
@ -1023,7 +1023,7 @@
|
|||
"name": "Organization: github"
|
||||
},
|
||||
"name": "librdkafka - the Apache Kafka C/C++ client library",
|
||||
"version": "2.0.2",
|
||||
"version": "2.11.0",
|
||||
"licenses": [
|
||||
{
|
||||
"license": {
|
||||
|
|
@ -1066,7 +1066,7 @@
|
|||
}
|
||||
}
|
||||
],
|
||||
"purl": "pkg:github/edenhill/librdkafka@v2.0.2",
|
||||
"purl": "pkg:github/edenhill/librdkafka@v2.11.0",
|
||||
"properties": [
|
||||
{
|
||||
"name": "internal:team_responsible",
|
||||
|
|
|
|||
|
|
@ -40,8 +40,15 @@ mongo_cc_library(
|
|||
"dist/src/lz4.c",
|
||||
"dist/src/lz4frame.c",
|
||||
"dist/src/lz4hc.c",
|
||||
"dist/src/nanopb/pb_common.c",
|
||||
"dist/src/nanopb/pb_decode.c",
|
||||
"dist/src/nanopb/pb_encode.c",
|
||||
"dist/src/opentelemetry/common.pb.c",
|
||||
"dist/src/opentelemetry/metrics.pb.c",
|
||||
"dist/src/opentelemetry/resource.pb.c",
|
||||
"dist/src/rdaddr.c",
|
||||
"dist/src/rdavl.c",
|
||||
"dist/src/rdbase64.c",
|
||||
"dist/src/rdbuf.c",
|
||||
"dist/src/rdcrc32.c",
|
||||
"dist/src/rddl.c",
|
||||
|
|
@ -95,6 +102,9 @@ mongo_cc_library(
|
|||
"dist/src/rdkafka_ssl.c",
|
||||
"dist/src/rdkafka_sticky_assignor.c",
|
||||
"dist/src/rdkafka_subscription.c",
|
||||
"dist/src/rdkafka_telemetry.c",
|
||||
"dist/src/rdkafka_telemetry_decode.c",
|
||||
"dist/src/rdkafka_telemetry_encode.c",
|
||||
"dist/src/rdkafka_timer.c",
|
||||
"dist/src/rdkafka_topic.c",
|
||||
"dist/src/rdkafka_transport.c",
|
||||
|
|
@ -127,6 +137,7 @@ mongo_cc_library(
|
|||
copts = [
|
||||
"-Wno-array-bounds",
|
||||
"-Wno-unused-variable",
|
||||
"-Wno-enum-conversion",
|
||||
"-Wno-implicit-fallthrough",
|
||||
"-Wno-unused-but-set-variable",
|
||||
"-I$(GENDIR)/src/third_party/librdkafka/dist/FAKE",
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
librdkafka - Apache Kafka C driver library
|
||||
|
||||
Copyright (c) 2012-2020, Magnus Edenhill
|
||||
Copyright (c) 2012-2022, Magnus Edenhill
|
||||
2023, Confluent Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3
|
||||
src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git 5ff839680134437dbf4678f3d0c7b371d84f4964
|
||||
|
||||
LZ4 Library
|
||||
Copyright (c) 2011-2016, Yann Collet
|
||||
Copyright (c) 2011-2020, Yann Collet
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,22 @@
|
|||
For files in src/nanopb : https://github.com/nanopb/nanopb/blob/8ef41e0ebd45daaf19459a011f67e66224b247cd/LICENSE.txt
|
||||
|
||||
Copyright (c) 2011 Petteri Aimonen <jpa at nanopb.mail.kapsi.fi>
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held liable
|
||||
for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any
|
||||
purpose, including commercial applications, and to alter it and
|
||||
redistribute it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you
|
||||
must not claim that you wrote the original software. If you use
|
||||
this software in a product, an acknowledgment in the product
|
||||
documentation would be appreciated but is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such, and
|
||||
must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any source
|
||||
distribution.
|
||||
|
|
@ -0,0 +1,203 @@
|
|||
For files in src/opentelemetry: https://github.com/open-telemetry/opentelemetry-proto/blob/81a296f9dba23e32d77f46d58c8ea4244a2157a6/LICENSE
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -2,7 +2,8 @@ LICENSE
|
|||
--------------------------------------------------------------
|
||||
librdkafka - Apache Kafka C driver library
|
||||
|
||||
Copyright (c) 2012-2020, Magnus Edenhill
|
||||
Copyright (c) 2012-2022, Magnus Edenhill
|
||||
2023, Confluent Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -140,10 +141,10 @@ THE SOFTWARE
|
|||
|
||||
LICENSE.lz4
|
||||
--------------------------------------------------------------
|
||||
src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3
|
||||
src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git 5ff839680134437dbf4678f3d0c7b371d84f4964
|
||||
|
||||
LZ4 Library
|
||||
Copyright (c) 2011-2016, Yann Collet
|
||||
Copyright (c) 2011-2020, Yann Collet
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
|
|
@ -197,6 +198,238 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|||
SOFTWARE.
|
||||
|
||||
|
||||
LICENSE.nanopb
|
||||
--------------------------------------------------------------
|
||||
For files in src/nanopb : https://github.com/nanopb/nanopb/blob/8ef41e0ebd45daaf19459a011f67e66224b247cd/LICENSE.txt
|
||||
|
||||
Copyright (c) 2011 Petteri Aimonen <jpa at nanopb.mail.kapsi.fi>
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held liable
|
||||
for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any
|
||||
purpose, including commercial applications, and to alter it and
|
||||
redistribute it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you
|
||||
must not claim that you wrote the original software. If you use
|
||||
this software in a product, an acknowledgment in the product
|
||||
documentation would be appreciated but is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such, and
|
||||
must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any source
|
||||
distribution.
|
||||
|
||||
|
||||
LICENSE.opentelemetry
|
||||
--------------------------------------------------------------
|
||||
For files in src/opentelemetry: https://github.com/open-telemetry/opentelemetry-proto/blob/81a296f9dba23e32d77f46d58c8ea4244a2157a6/LICENSE
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
LICENSE.pycrc
|
||||
--------------------------------------------------------------
|
||||
The following license applies to the files rdcrc32.c and rdcrc32.h which
|
||||
|
|
|
|||
|
|
@ -26,8 +26,6 @@
|
|||
#define WITH_GCC 1
|
||||
// gxx
|
||||
#define WITH_GXX 1
|
||||
// pkgconfig
|
||||
#define WITH_PKGCONFIG 1
|
||||
// install
|
||||
#define WITH_INSTALL 1
|
||||
// gnuar
|
||||
|
|
@ -51,21 +49,21 @@
|
|||
// atomic_64
|
||||
#define ATOMIC_OP(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)
|
||||
// parseversion
|
||||
#define RDKAFKA_VERSION_STR "2.0.2"
|
||||
#define RDKAFKA_VERSION_STR "2.11.0"
|
||||
// parseversion
|
||||
#define MKL_APP_VERSION "2.0.2"
|
||||
#define MKL_APP_VERSION "2.11.0"
|
||||
// c11threads
|
||||
#define WITH_C11THREADS 1
|
||||
// libdl
|
||||
#define WITH_LIBDL 1
|
||||
// WITH_PLUGINS
|
||||
#define WITH_PLUGINS 1
|
||||
// zlib
|
||||
#define WITH_ZLIB 1
|
||||
// libssl
|
||||
#define WITH_SSL 1
|
||||
// libcrypto
|
||||
#define OPENSSL_SUPPRESS_DEPRECATED "OPENSSL_SUPPRESS_DEPRECATED"
|
||||
// libsasl2
|
||||
#define WITH_SASL_CYRUS 1
|
||||
// libzstd
|
||||
#define WITH_ZSTD 1
|
||||
// libcurl
|
||||
#define WITH_CURL 1
|
||||
// WITH_HDRHISTOGRAM
|
||||
|
|
@ -99,5 +97,5 @@
|
|||
// getrusage
|
||||
#define HAVE_GETRUSAGE 1
|
||||
// BUILT_WITH
|
||||
#define BUILT_WITH "GCC GXX PKGCONFIG INSTALL GNULD LDS C11THREADS LIBDL PLUGINS ZLIB SSL SASL_CYRUS ZSTD CURL HDRHISTOGRAM SYSLOG SNAPPY SOCKEM SASL_SCRAM SASL_OAUTHBEARER OAUTHBEARER_OIDC"
|
||||
#define BUILT_WITH "GCC GXX INSTALL GNULD LDS C11THREADS LIBDL PLUGINS SSL SASL_CYRUS CURL HDRHISTOGRAM SYSLOG SNAPPY SOCKEM SASL_SCRAM SASL_OAUTHBEARER OAUTHBEARER_OIDC"
|
||||
#endif /* _CONFIG_H_ */
|
||||
|
|
|
|||
|
|
@ -51,9 +51,11 @@
|
|||
// atomic_64
|
||||
#define ATOMIC_OP(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)
|
||||
// parseversion
|
||||
#define RDKAFKA_VERSION_STR "2.0.2"
|
||||
#define RDKAFKA_VERSION_STR "2.11.0"
|
||||
// parseversion
|
||||
#define MKL_APP_VERSION "2.0.2"
|
||||
#define MKL_APP_VERSION "2.11.0"
|
||||
// c11threads
|
||||
#define WITH_C11THREADS 1
|
||||
// libdl
|
||||
#define WITH_LIBDL 1
|
||||
// WITH_PLUGINS
|
||||
|
|
@ -62,6 +64,8 @@
|
|||
#define WITH_ZLIB 1
|
||||
// libssl
|
||||
#define WITH_SSL 1
|
||||
// libcrypto
|
||||
#define OPENSSL_SUPPRESS_DEPRECATED "OPENSSL_SUPPRESS_DEPRECATED"
|
||||
// libsasl2
|
||||
#define WITH_SASL_CYRUS 1
|
||||
// libzstd
|
||||
|
|
@ -101,5 +105,5 @@
|
|||
// getrusage
|
||||
#define HAVE_GETRUSAGE 1
|
||||
// BUILT_WITH
|
||||
#define BUILT_WITH "GCC GXX PKGCONFIG INSTALL GNULD LDS LIBDL PLUGINS ZLIB SSL SASL_CYRUS ZSTD CURL HDRHISTOGRAM SYSLOG SNAPPY SOCKEM SASL_SCRAM SASL_OAUTHBEARER OAUTHBEARER_OIDC CRC32C_HW"
|
||||
#define BUILT_WITH "GCC GXX PKGCONFIG INSTALL GNULD LDS C11THREADS LIBDL PLUGINS ZLIB SSL SASL_CYRUS ZSTD CURL HDRHISTOGRAM SYSLOG SNAPPY SOCKEM SASL_SCRAM SASL_OAUTHBEARER OAUTHBEARER_OIDC CRC32C_HW"
|
||||
#endif /* _CONFIG_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* 2023, Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -130,7 +131,6 @@ int RdKafka::socket_cb_trampoline(int domain,
|
|||
return handle->socket_cb_->socket_cb(domain, type, protocol);
|
||||
}
|
||||
|
||||
|
||||
int RdKafka::resolve_cb_trampoline(const char *node,
|
||||
const char *service,
|
||||
const struct addrinfo *hints,
|
||||
|
|
@ -152,7 +152,6 @@ int RdKafka::connect_cb_trampoline(int sockfd,
|
|||
return handle->connect_cb_->connect_cb(sockfd, addr, addrlen, id);
|
||||
}
|
||||
|
||||
|
||||
int RdKafka::open_cb_trampoline(const char *pathname,
|
||||
int flags,
|
||||
mode_t mode,
|
||||
|
|
@ -426,6 +425,14 @@ rd_kafka_topic_partition_list_t *partitions_to_c_parts(
|
|||
rd_kafka_topic_partition_t *rktpar = rd_kafka_topic_partition_list_add(
|
||||
c_parts, tpi->topic_.c_str(), tpi->partition_);
|
||||
rktpar->offset = tpi->offset_;
|
||||
if (tpi->metadata_.size()) {
|
||||
void *metadata_p = mem_malloc(tpi->metadata_.size());
|
||||
memcpy(metadata_p, tpi->metadata_.data(), tpi->metadata_.size());
|
||||
rktpar->metadata = metadata_p;
|
||||
rktpar->metadata_size = tpi->metadata_.size();
|
||||
}
|
||||
if (tpi->leader_epoch_ != -1)
|
||||
rd_kafka_topic_partition_set_leader_epoch(rktpar, tpi->leader_epoch_);
|
||||
}
|
||||
|
||||
return c_parts;
|
||||
|
|
@ -449,6 +456,11 @@ void update_partitions_from_c_parts(
|
|||
p->partition == pp->partition_) {
|
||||
pp->offset_ = p->offset;
|
||||
pp->err_ = static_cast<RdKafka::ErrorCode>(p->err);
|
||||
pp->leader_epoch_ = rd_kafka_topic_partition_get_leader_epoch(p);
|
||||
if (p->metadata_size) {
|
||||
unsigned char *metadata = (unsigned char *)p->metadata;
|
||||
pp->metadata_.assign(metadata, metadata + p->metadata_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2015 Magnus Edenhill
|
||||
* Copyright (c) 2015-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2015 Magnus Edenhill
|
||||
* Copyright (c) 2015-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014-2022 Magnus Edenhill
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* 2023, Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -120,7 +121,7 @@ namespace RdKafka {
|
|||
* @remark This value should only be used during compile time,
|
||||
* for runtime checks of version use RdKafka::version()
|
||||
*/
|
||||
#define RD_KAFKA_VERSION 0x020002ff
|
||||
#define RD_KAFKA_VERSION 0x020b00ff
|
||||
|
||||
/**
|
||||
* @brief Returns the librdkafka version as integer.
|
||||
|
|
@ -333,6 +334,13 @@ enum ErrorCode {
|
|||
ERR__NOOP = -141,
|
||||
/** No offset to automatically reset to */
|
||||
ERR__AUTO_OFFSET_RESET = -140,
|
||||
/** Partition log truncation detected */
|
||||
ERR__LOG_TRUNCATION = -139,
|
||||
/** A different record in the batch was invalid
|
||||
* and this message failed persisting. */
|
||||
ERR__INVALID_DIFFERENT_RECORD = -138,
|
||||
/** Broker is going away but client isn't terminating */
|
||||
ERR__DESTROY_BROKER = -137,
|
||||
|
||||
/** End internal error codes */
|
||||
ERR__END = -100,
|
||||
|
|
@ -550,7 +558,28 @@ enum ErrorCode {
|
|||
/** Unable to update finalized features due to server error */
|
||||
ERR_FEATURE_UPDATE_FAILED = 96,
|
||||
/** Request principal deserialization failed during forwarding */
|
||||
ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97
|
||||
ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
|
||||
/** Unknown Topic Id */
|
||||
ERR_UNKNOWN_TOPIC_ID = 100,
|
||||
/** The member epoch is fenced by the group coordinator */
|
||||
ERR_FENCED_MEMBER_EPOCH = 110,
|
||||
/** The instance ID is still used by another member in the
|
||||
* consumer group */
|
||||
ERR_UNRELEASED_INSTANCE_ID = 111,
|
||||
/** The assignor or its version range is not supported by the consumer
|
||||
* group */
|
||||
ERR_UNSUPPORTED_ASSIGNOR = 112,
|
||||
/** The member epoch is stale */
|
||||
ERR_STALE_MEMBER_EPOCH = 113,
|
||||
/** Client sent a push telemetry request with an invalid or outdated
|
||||
* subscription ID. */
|
||||
ERR_UNKNOWN_SUBSCRIPTION_ID = 117,
|
||||
/** Client sent a push telemetry request larger than the maximum size
|
||||
* the broker will accept. */
|
||||
ERR_TELEMETRY_TOO_LARGE = 118,
|
||||
/** Client metadata is stale,
|
||||
* client should rebootstrap to obtain new metadata. */
|
||||
ERR_REBOOTSTRAP_REQUIRED = 129
|
||||
};
|
||||
|
||||
|
||||
|
|
@ -2061,6 +2090,18 @@ class RD_EXPORT TopicPartition {
|
|||
|
||||
/** @returns error code (if applicable) */
|
||||
virtual ErrorCode err() const = 0;
|
||||
|
||||
/** @brief Get partition leader epoch, or -1 if not known or relevant. */
|
||||
virtual int32_t get_leader_epoch() = 0;
|
||||
|
||||
/** @brief Set partition leader epoch. */
|
||||
virtual void set_leader_epoch(int32_t leader_epoch) = 0;
|
||||
|
||||
/** @brief Get partition metadata. */
|
||||
virtual std::vector<unsigned char> get_metadata() = 0;
|
||||
|
||||
/** @brief Set partition metadata. */
|
||||
virtual void set_metadata(std::vector<unsigned char> &metadata) = 0;
|
||||
};
|
||||
|
||||
|
||||
|
|
@ -2118,6 +2159,11 @@ class RD_EXPORT Topic {
|
|||
* The offset will be committed (written) to the broker (or file) according
|
||||
* to \p auto.commit.interval.ms or next manual offset-less commit call.
|
||||
*
|
||||
* @deprecated This API lacks support for partition leader epochs, which makes
|
||||
* it at risk for unclean leader election log truncation issues.
|
||||
* Use KafkaConsumer::offsets_store() or
|
||||
* Message::offset_store() instead.
|
||||
*
|
||||
* @remark \c enable.auto.offset.store must be set to \c false when using
|
||||
* this API.
|
||||
*
|
||||
|
|
@ -2548,6 +2594,31 @@ class RD_EXPORT Message {
|
|||
/** @returns the broker id of the broker the message was produced to or
|
||||
* fetched from, or -1 if not known/applicable. */
|
||||
virtual int32_t broker_id() const = 0;
|
||||
|
||||
/** @returns the message's partition leader epoch at the time the message was
|
||||
* fetched and if known, else -1. */
|
||||
virtual int32_t leader_epoch() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Store offset +1 for the consumed message.
|
||||
*
|
||||
* The message offset + 1 will be committed to broker according
|
||||
* to \c `auto.commit.interval.ms` or manual offset-less commit()
|
||||
*
|
||||
* @warning This method may only be called for partitions that are currently
|
||||
* assigned.
|
||||
* Non-assigned partitions will fail with ERR__STATE.
|
||||
*
|
||||
* @warning Avoid storing offsets after calling seek() (et.al) as
|
||||
* this may later interfere with resuming a paused partition, instead
|
||||
* store offsets prior to calling seek.
|
||||
*
|
||||
* @remark \c `enable.auto.offset.store` must be set to "false" when using
|
||||
* this API.
|
||||
*
|
||||
* @returns NULL on success or an error object on failure.
|
||||
*/
|
||||
virtual Error *offset_store() = 0;
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
|
@ -2948,6 +3019,9 @@ class RD_EXPORT KafkaConsumer : public virtual Handle {
|
|||
* @remark \c enable.auto.offset.store must be set to \c false when using
|
||||
* this API.
|
||||
*
|
||||
* @remark The leader epoch, if set, will be used to fence outdated partition
|
||||
* leaders. See TopicPartition::set_leader_epoch().
|
||||
*
|
||||
* @returns RdKafka::ERR_NO_ERROR on success, or
|
||||
* RdKafka::ERR___UNKNOWN_PARTITION if none of the offsets could
|
||||
* be stored, or
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* 2023, Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -550,6 +551,21 @@ class MessageImpl : public Message {
|
|||
return rd_kafka_message_broker_id(rkmessage_);
|
||||
}
|
||||
|
||||
int32_t leader_epoch() const {
|
||||
return rd_kafka_message_leader_epoch(rkmessage_);
|
||||
}
|
||||
|
||||
|
||||
Error *offset_store() {
|
||||
rd_kafka_error_t *c_error;
|
||||
|
||||
c_error = rd_kafka_offset_store_message(rkmessage_);
|
||||
|
||||
if (c_error)
|
||||
return new ErrorImpl(c_error);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
RdKafka::Topic *topic_;
|
||||
rd_kafka_message_t *rkmessage_;
|
||||
|
|
@ -759,7 +775,6 @@ class ConfImpl : public Conf {
|
|||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
|
||||
Conf::ConfResult set(const std::string &name,
|
||||
OpenCb *open_cb,
|
||||
std::string &errstr) {
|
||||
|
|
@ -926,6 +941,20 @@ class ConfImpl : public Conf {
|
|||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
Conf::ConfResult get(ResolveCb *&resolve_cb) const {
|
||||
if (!rk_conf_)
|
||||
return Conf::CONF_INVALID;
|
||||
resolve_cb = this->resolve_cb_;
|
||||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
Conf::ConfResult get(ConnectCb *&connect_cb) const {
|
||||
if (!rk_conf_)
|
||||
return Conf::CONF_INVALID;
|
||||
connect_cb = this->connect_cb_;
|
||||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
Conf::ConfResult get(
|
||||
OAuthBearerTokenRefreshCb *&oauthbearer_token_refresh_cb) const {
|
||||
if (!rk_conf_)
|
||||
|
|
@ -962,20 +991,6 @@ class ConfImpl : public Conf {
|
|||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
Conf::ConfResult get(ResolveCb *&resolve_cb) const {
|
||||
if (!rk_conf_)
|
||||
return Conf::CONF_INVALID;
|
||||
resolve_cb = this->resolve_cb_;
|
||||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
Conf::ConfResult get(ConnectCb *&connect_cb) const {
|
||||
if (!rk_conf_)
|
||||
return Conf::CONF_INVALID;
|
||||
connect_cb = this->connect_cb_;
|
||||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
Conf::ConfResult get(OpenCb *&open_cb) const {
|
||||
if (!rk_conf_)
|
||||
return Conf::CONF_INVALID;
|
||||
|
|
@ -1294,14 +1309,16 @@ class TopicPartitionImpl : public TopicPartition {
|
|||
topic_(topic),
|
||||
partition_(partition),
|
||||
offset_(RdKafka::Topic::OFFSET_INVALID),
|
||||
err_(ERR_NO_ERROR) {
|
||||
err_(ERR_NO_ERROR),
|
||||
leader_epoch_(-1) {
|
||||
}
|
||||
|
||||
TopicPartitionImpl(const std::string &topic, int partition, int64_t offset) :
|
||||
topic_(topic),
|
||||
partition_(partition),
|
||||
offset_(offset),
|
||||
err_(ERR_NO_ERROR) {
|
||||
err_(ERR_NO_ERROR),
|
||||
leader_epoch_(-1) {
|
||||
}
|
||||
|
||||
TopicPartitionImpl(const rd_kafka_topic_partition_t *c_part) {
|
||||
|
|
@ -1309,7 +1326,11 @@ class TopicPartitionImpl : public TopicPartition {
|
|||
partition_ = c_part->partition;
|
||||
offset_ = c_part->offset;
|
||||
err_ = static_cast<ErrorCode>(c_part->err);
|
||||
// FIXME: metadata
|
||||
leader_epoch_ = rd_kafka_topic_partition_get_leader_epoch(c_part);
|
||||
if (c_part->metadata_size > 0) {
|
||||
unsigned char *metadata = (unsigned char *)c_part->metadata;
|
||||
metadata_.assign(metadata, metadata + c_part->metadata_size);
|
||||
}
|
||||
}
|
||||
|
||||
static void destroy(std::vector<TopicPartition *> &partitions);
|
||||
|
|
@ -1333,6 +1354,22 @@ class TopicPartitionImpl : public TopicPartition {
|
|||
offset_ = offset;
|
||||
}
|
||||
|
||||
int32_t get_leader_epoch() {
|
||||
return leader_epoch_;
|
||||
}
|
||||
|
||||
void set_leader_epoch(int32_t leader_epoch) {
|
||||
leader_epoch_ = leader_epoch;
|
||||
}
|
||||
|
||||
std::vector<unsigned char> get_metadata() {
|
||||
return metadata_;
|
||||
}
|
||||
|
||||
void set_metadata(std::vector<unsigned char> &metadata) {
|
||||
metadata_ = metadata;
|
||||
}
|
||||
|
||||
std::ostream &operator<<(std::ostream &ostrm) const {
|
||||
return ostrm << topic_ << " [" << partition_ << "]";
|
||||
}
|
||||
|
|
@ -1341,6 +1378,8 @@ class TopicPartitionImpl : public TopicPartition {
|
|||
int partition_;
|
||||
int64_t offset_;
|
||||
ErrorCode err_;
|
||||
int32_t leader_epoch_;
|
||||
std::vector<unsigned char> metadata_;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,30 @@
|
|||
# Instructions for Updating KLZ4 Version
|
||||
|
||||
This document describes the steps to update the bundled lz4 version, that is,
|
||||
the version used when `./configure` is run with `--disable-lz4-ext`.
|
||||
|
||||
1. For each file in the [lz4 repository's](https://github.com/lz4/lz4/) `lib`
|
||||
directory (checked out to the appropriate version tag), copy it into the
|
||||
librdkafka `src` directory, overwriting the previous files.
|
||||
2. Copy `xxhash.h` and `xxhash.c` files, and rename them to `rdxxhash.h` and
|
||||
`rdxxhash.c`, respectively, replacing the previous files. Change any
|
||||
`#include`s of `xxhash.h` to `rdxxhash.h`.
|
||||
3. Replace the `#else` block of the
|
||||
`#if defined(KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)`
|
||||
with the following code, including the comment:
|
||||
```c
|
||||
#else
|
||||
/* NOTE: While upgrading the lz4 version, replace the original `#else` block
|
||||
* in the code with this block, and retain this comment. */
|
||||
struct rdkafka_s;
|
||||
extern void *rd_kafka_mem_malloc(struct rdkafka_s *rk, size_t s);
|
||||
extern void *rd_kafka_mem_calloc(struct rdkafka_s *rk, size_t n, size_t s);
|
||||
extern void rd_kafka_mem_free(struct rdkafka_s *rk, void *p);
|
||||
# define ALLOC(s) rd_kafka_mem_malloc(NULL, s)
|
||||
# define ALLOC_AND_ZERO(s) rd_kafka_mem_calloc(NULL, 1, s)
|
||||
# define FREEMEM(p) rd_kafka_mem_free(NULL, p)
|
||||
#endif
|
||||
```
|
||||
4. Change version mentioned for lz4 in `configure.self`.
|
||||
4. Run `./configure` with `--disable-lz4-ext` option, make and run test 0017.
|
||||
5. Update CHANGELOG.md and both the lz4 LICENSE, and the combined LICENSE.
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,398 +0,0 @@
|
|||
/*
|
||||
Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef cJSON__h
|
||||
#define cJSON__h
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if !defined(__WINDOWS__) && \
|
||||
(defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32))
|
||||
#define __WINDOWS__
|
||||
#endif
|
||||
|
||||
#ifdef __WINDOWS__
|
||||
|
||||
/* When compiling for windows, we specify a specific calling convention to avoid
|
||||
issues where we are being called from a project with a different default calling
|
||||
convention. For windows you have 3 define options:
|
||||
|
||||
CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever
|
||||
dllexport symbols CJSON_EXPORT_SYMBOLS - Define this on library build when you
|
||||
want to dllexport symbols (default) CJSON_IMPORT_SYMBOLS - Define this if you
|
||||
want to dllimport symbol
|
||||
|
||||
For *nix builds that support visibility attribute, you can define similar
|
||||
behavior by
|
||||
|
||||
setting default visibility to hidden by adding
|
||||
-fvisibility=hidden (for gcc)
|
||||
or
|
||||
-xldscope=hidden (for sun cc)
|
||||
to CFLAGS
|
||||
|
||||
then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way
|
||||
CJSON_EXPORT_SYMBOLS does
|
||||
|
||||
*/
|
||||
|
||||
#define CJSON_CDECL __cdecl
|
||||
#define CJSON_STDCALL __stdcall
|
||||
|
||||
/* export symbols by default, this is necessary for copy pasting the C and
|
||||
* header file */
|
||||
#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && \
|
||||
!defined(CJSON_EXPORT_SYMBOLS)
|
||||
#define CJSON_EXPORT_SYMBOLS
|
||||
#endif
|
||||
|
||||
#if defined(CJSON_HIDE_SYMBOLS)
|
||||
#define CJSON_PUBLIC(type) type CJSON_STDCALL
|
||||
#elif defined(CJSON_EXPORT_SYMBOLS)
|
||||
#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL
|
||||
#elif defined(CJSON_IMPORT_SYMBOLS)
|
||||
#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL
|
||||
#endif
|
||||
#else /* !__WINDOWS__ */
|
||||
#define CJSON_CDECL
|
||||
#define CJSON_STDCALL
|
||||
|
||||
#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined(__SUNPRO_C)) && \
|
||||
defined(CJSON_API_VISIBILITY)
|
||||
#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type
|
||||
#else
|
||||
#define CJSON_PUBLIC(type) type
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* project version */
|
||||
#define CJSON_VERSION_MAJOR 1
|
||||
#define CJSON_VERSION_MINOR 7
|
||||
#define CJSON_VERSION_PATCH 14
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
/* cJSON Types: */
|
||||
#define cJSON_Invalid (0)
|
||||
#define cJSON_False (1 << 0)
|
||||
#define cJSON_True (1 << 1)
|
||||
#define cJSON_NULL (1 << 2)
|
||||
#define cJSON_Number (1 << 3)
|
||||
#define cJSON_String (1 << 4)
|
||||
#define cJSON_Array (1 << 5)
|
||||
#define cJSON_Object (1 << 6)
|
||||
#define cJSON_Raw (1 << 7) /* raw json */
|
||||
|
||||
#define cJSON_IsReference 256
|
||||
#define cJSON_StringIsConst 512
|
||||
|
||||
/* The cJSON structure: */
|
||||
typedef struct cJSON {
|
||||
/* next/prev allow you to walk array/object chains. Alternatively, use
|
||||
* GetArraySize/GetArrayItem/GetObjectItem */
|
||||
struct cJSON *next;
|
||||
struct cJSON *prev;
|
||||
/* An array or object item will have a child pointer pointing to a chain
|
||||
* of the items in the array/object. */
|
||||
struct cJSON *child;
|
||||
|
||||
/* The type of the item, as above. */
|
||||
int type;
|
||||
|
||||
/* The item's string, if type==cJSON_String and type == cJSON_Raw */
|
||||
char *valuestring;
|
||||
/* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead
|
||||
*/
|
||||
int valueint;
|
||||
/* The item's number, if type==cJSON_Number */
|
||||
double valuedouble;
|
||||
|
||||
/* The item's name string, if this item is the child of, or is in the
|
||||
* list of subitems of an object. */
|
||||
char *string;
|
||||
} cJSON;
|
||||
|
||||
typedef struct cJSON_Hooks {
|
||||
/* malloc/free are CDECL on Windows regardless of the default calling
|
||||
* convention of the compiler, so ensure the hooks allow passing those
|
||||
* functions directly. */
|
||||
void *(CJSON_CDECL *malloc_fn)(size_t sz);
|
||||
void(CJSON_CDECL *free_fn)(void *ptr);
|
||||
} cJSON_Hooks;
|
||||
|
||||
typedef int cJSON_bool;
|
||||
|
||||
/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse
|
||||
* them. This is to prevent stack overflows. */
|
||||
#ifndef CJSON_NESTING_LIMIT
|
||||
#define CJSON_NESTING_LIMIT 1000
|
||||
#endif
|
||||
|
||||
/* returns the version of cJSON as a string */
|
||||
CJSON_PUBLIC(const char *) cJSON_Version(void);
|
||||
|
||||
/* Supply malloc, realloc and free functions to cJSON */
|
||||
CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks *hooks);
|
||||
|
||||
/* Memory Management: the caller is always responsible to free the results from
|
||||
* all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib
|
||||
* free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is
|
||||
* cJSON_PrintPreallocated, where the caller has full responsibility of the
|
||||
* buffer. */
|
||||
/* Supply a block of JSON, and this returns a cJSON object you can interrogate.
|
||||
*/
|
||||
CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_ParseWithLength(const char *value, size_t buffer_length);
|
||||
/* ParseWithOpts allows you to require (and check) that the JSON is null
|
||||
* terminated, and to retrieve the pointer to the final byte parsed. */
|
||||
/* If you supply a ptr in return_parse_end and parsing fails, then
|
||||
* return_parse_end will contain a pointer to the error so will match
|
||||
* cJSON_GetErrorPtr(). */
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_ParseWithOpts(const char *value,
|
||||
const char **return_parse_end,
|
||||
cJSON_bool require_null_terminated);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_ParseWithLengthOpts(const char *value,
|
||||
size_t buffer_length,
|
||||
const char **return_parse_end,
|
||||
cJSON_bool require_null_terminated);
|
||||
|
||||
/* Render a cJSON entity to text for transfer/storage. */
|
||||
CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item);
|
||||
/* Render a cJSON entity to text for transfer/storage without any formatting. */
|
||||
CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item);
|
||||
/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess
|
||||
* at the final size. guessing well reduces reallocation. fmt=0 gives
|
||||
* unformatted, =1 gives formatted */
|
||||
CJSON_PUBLIC(char *)
|
||||
cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt);
|
||||
/* Render a cJSON entity to text using a buffer already allocated in memory with
|
||||
* given length. Returns 1 on success and 0 on failure. */
|
||||
/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will
|
||||
* use, so to be safe allocate 5 bytes more than you actually need */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_PrintPreallocated(cJSON *item,
|
||||
char *buffer,
|
||||
const int length,
|
||||
const cJSON_bool format);
|
||||
/* Delete a cJSON entity and all subentities. */
|
||||
CJSON_PUBLIC(void) cJSON_Delete(cJSON *item);
|
||||
|
||||
/* Returns the number of items in an array (or object). */
|
||||
CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array);
|
||||
/* Retrieve item number "index" from array "array". Returns NULL if
|
||||
* unsuccessful. */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index);
|
||||
/* Get item "string" from object. Case insensitive. */
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_GetObjectItem(const cJSON *const object, const char *const string);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_GetObjectItemCaseSensitive(const cJSON *const object,
|
||||
const char *const string);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_HasObjectItem(const cJSON *object, const char *string);
|
||||
/* For analysing failed parses. This returns a pointer to the parse error.
|
||||
* You'll probably need to look a few chars back to make sense of it. Defined
|
||||
* when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */
|
||||
CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void);
|
||||
|
||||
/* Check item type and return its value */
|
||||
CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON *const item);
|
||||
CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON *const item);
|
||||
|
||||
/* These functions check the type of an item */
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON *const item);
|
||||
|
||||
/* These calls create a cJSON item of the appropriate type. */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string);
|
||||
/* raw json */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void);
|
||||
|
||||
/* Create a string where valuestring references a string so
|
||||
* it will not be freed by cJSON_Delete */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string);
|
||||
/* Create an object/array that only references it's elements so
|
||||
* they will not be freed by cJSON_Delete */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child);
|
||||
|
||||
/* These utilities create an Array of count items.
|
||||
* The parameter count cannot be greater than the number of elements in the
|
||||
* number array, otherwise array access will be out of bounds.*/
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_CreateStringArray(const char *const *strings, int count);
|
||||
|
||||
/* Append item to the specified array/object. */
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item);
|
||||
/* Use this when string is definitely const (i.e. a literal, or as good as), and
|
||||
* will definitely survive the cJSON object. WARNING: When this function was
|
||||
* used, make sure to always check that (item->type & cJSON_StringIsConst) is
|
||||
* zero before writing to `item->string` */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item);
|
||||
/* Append reference to item to the specified array/object. Use this when you
|
||||
* want to add an existing cJSON to a new cJSON, but don't want to corrupt your
|
||||
* existing cJSON. */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item);
|
||||
|
||||
/* Remove/Detach items from Arrays/Objects. */
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_DetachItemViaPointer(cJSON *parent, cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which);
|
||||
CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_DetachItemFromObject(cJSON *object, const char *string);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string);
|
||||
CJSON_PUBLIC(void)
|
||||
cJSON_DeleteItemFromObject(cJSON *object, const char *string);
|
||||
CJSON_PUBLIC(void)
|
||||
cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string);
|
||||
|
||||
/* Update array items. */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_InsertItemInArray(
|
||||
cJSON *array,
|
||||
int which,
|
||||
cJSON *newitem); /* Shifts pre-existing items to the right. */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_ReplaceItemViaPointer(cJSON *const parent,
|
||||
cJSON *const item,
|
||||
cJSON *replacement);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,
|
||||
const char *string,
|
||||
cJSON *newitem);
|
||||
|
||||
/* Duplicate a cJSON item */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse);
|
||||
/* Duplicate will create a new, identical cJSON item to the one you pass, in new
|
||||
* memory that will need to be released. With recurse!=0, it will duplicate any
|
||||
* children connected to the item.
|
||||
* The item->next and ->prev pointers are always zero on return from Duplicate.
|
||||
*/
|
||||
/* Recursively compare two cJSON items for equality. If either a or b is NULL or
|
||||
* invalid, they will be considered unequal.
|
||||
* case_sensitive determines if object keys are treated case sensitive (1) or
|
||||
* case insensitive (0) */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_Compare(const cJSON *const a,
|
||||
const cJSON *const b,
|
||||
const cJSON_bool case_sensitive);
|
||||
|
||||
/* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from
|
||||
* strings. The input pointer json cannot point to a read-only address area,
|
||||
* such as a string constant,
|
||||
* but should point to a readable and writable adress area. */
|
||||
CJSON_PUBLIC(void) cJSON_Minify(char *json);
|
||||
|
||||
/* Helper functions for creating and adding items to an object at the same time.
|
||||
* They return the added item or NULL on failure. */
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddNullToObject(cJSON *const object, const char *const name);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddTrueToObject(cJSON *const object, const char *const name);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddFalseToObject(cJSON *const object, const char *const name);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddBoolToObject(cJSON *const object,
|
||||
const char *const name,
|
||||
const cJSON_bool boolean);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddNumberToObject(cJSON *const object,
|
||||
const char *const name,
|
||||
const double number);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddStringToObject(cJSON *const object,
|
||||
const char *const name,
|
||||
const char *const string);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddRawToObject(cJSON *const object,
|
||||
const char *const name,
|
||||
const char *const raw);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddObjectToObject(cJSON *const object, const char *const name);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddArrayToObject(cJSON *const object, const char *const name);
|
||||
|
||||
/* When assigning an integer value, it needs to be propagated to valuedouble
|
||||
* too. */
|
||||
#define cJSON_SetIntValue(object, number) \
|
||||
((object) ? (object)->valueint = (object)->valuedouble = (number) \
|
||||
: (number))
|
||||
/* helper for the cJSON_SetNumberValue macro */
|
||||
CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number);
|
||||
#define cJSON_SetNumberValue(object, number) \
|
||||
((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) \
|
||||
: (number))
|
||||
/* Change the valuestring of a cJSON_String object, only takes effect when type
|
||||
* of object is cJSON_String */
|
||||
CJSON_PUBLIC(char *)
|
||||
cJSON_SetValuestring(cJSON *object, const char *valuestring);
|
||||
|
||||
/* Macro for iterating over an array or object */
|
||||
#define cJSON_ArrayForEach(element, array) \
|
||||
for (element = (array != NULL) ? (array)->child : NULL; \
|
||||
element != NULL; element = element->next)
|
||||
|
||||
/* malloc/free objects using the malloc/free functions that have been set with
|
||||
* cJSON_InitHooks */
|
||||
CJSON_PUBLIC(void *) cJSON_malloc(size_t size);
|
||||
CJSON_PUBLIC(void) cJSON_free(void *object);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2017 Magnus Edenhill
|
||||
* Copyright (c) 2017-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* KLZ4 - Fast LZ compression algorithm
|
||||
* Header File
|
||||
* Copyright (C) 2011-present, Yann Collet.
|
||||
* Copyright (C) 2011-2020, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
|
|
@ -97,36 +97,77 @@ extern "C" {
|
|||
# define KLZ4LIB_API KLZ4LIB_VISIBILITY
|
||||
#endif
|
||||
|
||||
/*! KLZ4_FREESTANDING :
|
||||
* When this macro is set to 1, it enables "freestanding mode" that is
|
||||
* suitable for typical freestanding environment which doesn't support
|
||||
* standard C library.
|
||||
*
|
||||
* - KLZ4_FREESTANDING is a compile-time switch.
|
||||
* - It requires the following macros to be defined:
|
||||
* KLZ4_memcpy, KLZ4_memmove, KLZ4_memset.
|
||||
* - It only enables KLZ4/HC functions which don't use heap.
|
||||
* All KLZ4F_* functions are not supported.
|
||||
* - See tests/freestanding.c to check its basic setup.
|
||||
*/
|
||||
#if defined(KLZ4_FREESTANDING) && (KLZ4_FREESTANDING == 1)
|
||||
# define KLZ4_HEAPMODE 0
|
||||
# define KLZ4HC_HEAPMODE 0
|
||||
# define KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION 1
|
||||
# if !defined(KLZ4_memcpy)
|
||||
# error "KLZ4_FREESTANDING requires macro 'KLZ4_memcpy'."
|
||||
# endif
|
||||
# if !defined(KLZ4_memset)
|
||||
# error "KLZ4_FREESTANDING requires macro 'KLZ4_memset'."
|
||||
# endif
|
||||
# if !defined(KLZ4_memmove)
|
||||
# error "KLZ4_FREESTANDING requires macro 'KLZ4_memmove'."
|
||||
# endif
|
||||
#elif ! defined(KLZ4_FREESTANDING)
|
||||
# define KLZ4_FREESTANDING 0
|
||||
#endif
|
||||
|
||||
|
||||
/*------ Version ------*/
|
||||
#define KLZ4_VERSION_MAJOR 1 /* for breaking interface changes */
|
||||
#define KLZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */
|
||||
#define KLZ4_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */
|
||||
#define KLZ4_VERSION_RELEASE 4 /* for tweaks, bug-fixes, or development */
|
||||
|
||||
#define KLZ4_VERSION_NUMBER (KLZ4_VERSION_MAJOR *100*100 + KLZ4_VERSION_MINOR *100 + KLZ4_VERSION_RELEASE)
|
||||
|
||||
#define KLZ4_LIB_VERSION KLZ4_VERSION_MAJOR.KLZ4_VERSION_MINOR.KLZ4_VERSION_RELEASE
|
||||
#define KLZ4_QUOTE(str) #str
|
||||
#define KLZ4_EXPAND_AND_QUOTE(str) KLZ4_QUOTE(str)
|
||||
#define KLZ4_VERSION_STRING KLZ4_EXPAND_AND_QUOTE(KLZ4_LIB_VERSION)
|
||||
#define KLZ4_VERSION_STRING KLZ4_EXPAND_AND_QUOTE(KLZ4_LIB_VERSION) /* requires v1.7.3+ */
|
||||
|
||||
KLZ4LIB_API int KLZ4_versionNumber (void); /**< library version number; useful to check dll version */
|
||||
KLZ4LIB_API const char* KLZ4_versionString (void); /**< library version string; useful to check dll version */
|
||||
KLZ4LIB_API int KLZ4_versionNumber (void); /**< library version number; useful to check dll version; requires v1.3.0+ */
|
||||
KLZ4LIB_API const char* KLZ4_versionString (void); /**< library version string; useful to check dll version; requires v1.7.5+ */
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Tuning parameter
|
||||
**************************************/
|
||||
#define KLZ4_MEMORY_USAGE_MIN 10
|
||||
#define KLZ4_MEMORY_USAGE_DEFAULT 14
|
||||
#define KLZ4_MEMORY_USAGE_MAX 20
|
||||
|
||||
/*!
|
||||
* KLZ4_MEMORY_USAGE :
|
||||
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
|
||||
* Increasing memory usage improves compression ratio.
|
||||
* Reduced memory usage may improve speed, thanks to better cache locality.
|
||||
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; )
|
||||
* Increasing memory usage improves compression ratio, at the cost of speed.
|
||||
* Reduced memory usage may improve speed at the cost of ratio, thanks to better cache locality.
|
||||
* Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
|
||||
*/
|
||||
#ifndef KLZ4_MEMORY_USAGE
|
||||
# define KLZ4_MEMORY_USAGE 14
|
||||
# define KLZ4_MEMORY_USAGE KLZ4_MEMORY_USAGE_DEFAULT
|
||||
#endif
|
||||
|
||||
#if (KLZ4_MEMORY_USAGE < KLZ4_MEMORY_USAGE_MIN)
|
||||
# error "KLZ4_MEMORY_USAGE is too small !"
|
||||
#endif
|
||||
|
||||
#if (KLZ4_MEMORY_USAGE > KLZ4_MEMORY_USAGE_MAX)
|
||||
# error "KLZ4_MEMORY_USAGE is too large !"
|
||||
#endif
|
||||
|
||||
/*-************************************
|
||||
* Simple Functions
|
||||
|
|
@ -270,8 +311,25 @@ KLZ4LIB_API int KLZ4_decompress_safe_partial (const char* src, char* dst, int sr
|
|||
***********************************************/
|
||||
typedef union KLZ4_stream_u KLZ4_stream_t; /* incomplete type (defined later) */
|
||||
|
||||
/**
|
||||
Note about RC_INVOKED
|
||||
|
||||
- RC_INVOKED is predefined symbol of rc.exe (the resource compiler which is part of MSVC/Visual Studio).
|
||||
https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros
|
||||
|
||||
- Since rc.exe is a legacy compiler, it truncates long symbol (> 30 chars)
|
||||
and reports warning "RC4011: identifier truncated".
|
||||
|
||||
- To eliminate the warning, we surround long preprocessor symbol with
|
||||
"#if !defined(RC_INVOKED) ... #endif" block that means
|
||||
"skip this block when rc.exe is trying to read it".
|
||||
*/
|
||||
#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */
|
||||
#if !defined(KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
|
||||
KLZ4LIB_API KLZ4_stream_t* KLZ4_createStream(void);
|
||||
KLZ4LIB_API int KLZ4_freeStream (KLZ4_stream_t* streamPtr);
|
||||
#endif /* !defined(KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */
|
||||
#endif
|
||||
|
||||
/*! KLZ4_resetStream_fast() : v1.9.0+
|
||||
* Use this to prepare an KLZ4_stream_t for a new chain of dependent blocks
|
||||
|
|
@ -355,8 +413,12 @@ typedef union KLZ4_streamDecode_u KLZ4_streamDecode_t; /* tracking context */
|
|||
* creation / destruction of streaming decompression tracking context.
|
||||
* A tracking context can be re-used multiple times.
|
||||
*/
|
||||
#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */
|
||||
#if !defined(KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
|
||||
KLZ4LIB_API KLZ4_streamDecode_t* KLZ4_createStreamDecode(void);
|
||||
KLZ4LIB_API int KLZ4_freeStreamDecode (KLZ4_streamDecode_t* KLZ4_stream);
|
||||
#endif /* !defined(KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */
|
||||
#endif
|
||||
|
||||
/*! KLZ4_setStreamDecode() :
|
||||
* An KLZ4_streamDecode_t context can be allocated once and re-used multiple times.
|
||||
|
|
@ -406,7 +468,10 @@ KLZ4LIB_API int KLZ4_decoderRingBufferSize(int maxBlockSize);
|
|||
* save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression,
|
||||
* then indicate where this data is saved using KLZ4_setStreamDecode(), before decompressing next block.
|
||||
*/
|
||||
KLZ4LIB_API int KLZ4_decompress_safe_continue (KLZ4_streamDecode_t* KLZ4_streamDecode, const char* src, char* dst, int srcSize, int dstCapacity);
|
||||
KLZ4LIB_API int
|
||||
KLZ4_decompress_safe_continue (KLZ4_streamDecode_t* KLZ4_streamDecode,
|
||||
const char* src, char* dst,
|
||||
int srcSize, int dstCapacity);
|
||||
|
||||
|
||||
/*! KLZ4_decompress_*_usingDict() :
|
||||
|
|
@ -417,7 +482,16 @@ KLZ4LIB_API int KLZ4_decompress_safe_continue (KLZ4_streamDecode_t* KLZ4_streamD
|
|||
* Performance tip : Decompression speed can be substantially increased
|
||||
* when dst == dictStart + dictSize.
|
||||
*/
|
||||
KLZ4LIB_API int KLZ4_decompress_safe_usingDict (const char* src, char* dst, int srcSize, int dstCapcity, const char* dictStart, int dictSize);
|
||||
KLZ4LIB_API int
|
||||
KLZ4_decompress_safe_usingDict(const char* src, char* dst,
|
||||
int srcSize, int dstCapacity,
|
||||
const char* dictStart, int dictSize);
|
||||
|
||||
KLZ4LIB_API int
|
||||
KLZ4_decompress_safe_partial_usingDict(const char* src, char* dst,
|
||||
int compressedSize,
|
||||
int targetOutputSize, int maxOutputSize,
|
||||
const char* dictStart, int dictSize);
|
||||
|
||||
#endif /* KLZ4_H_2983827168210 */
|
||||
|
||||
|
|
@ -496,13 +570,15 @@ KLZ4LIB_STATIC_API int KLZ4_compress_fast_extState_fastReset (void* state, const
|
|||
* stream (and source buffer) must remain in-place / accessible / unchanged
|
||||
* through the completion of the first compression call on the stream.
|
||||
*/
|
||||
KLZ4LIB_STATIC_API void KLZ4_attach_dictionary(KLZ4_stream_t* workingStream, const KLZ4_stream_t* dictionaryStream);
|
||||
KLZ4LIB_STATIC_API void
|
||||
KLZ4_attach_dictionary(KLZ4_stream_t* workingStream,
|
||||
const KLZ4_stream_t* dictionaryStream);
|
||||
|
||||
|
||||
/*! In-place compression and decompression
|
||||
*
|
||||
* It's possible to have input and output sharing the same buffer,
|
||||
* for highly contrained memory environments.
|
||||
* for highly constrained memory environments.
|
||||
* In both cases, it requires input to lay at the end of the buffer,
|
||||
* and decompression to start at beginning of the buffer.
|
||||
* Buffer size must feature some margin, hence be larger than final size.
|
||||
|
|
@ -592,38 +668,26 @@ KLZ4LIB_STATIC_API void KLZ4_attach_dictionary(KLZ4_stream_t* workingStream, con
|
|||
typedef unsigned int KLZ4_u32;
|
||||
#endif
|
||||
|
||||
/*! KLZ4_stream_t :
|
||||
* Never ever use below internal definitions directly !
|
||||
* These definitions are not API/ABI safe, and may change in future versions.
|
||||
* If you need static allocation, declare or allocate an KLZ4_stream_t object.
|
||||
**/
|
||||
|
||||
typedef struct KLZ4_stream_t_internal KLZ4_stream_t_internal;
|
||||
struct KLZ4_stream_t_internal {
|
||||
KLZ4_u32 hashTable[KLZ4_HASH_SIZE_U32];
|
||||
KLZ4_u32 currentOffset;
|
||||
KLZ4_u32 tableType;
|
||||
const KLZ4_byte* dictionary;
|
||||
const KLZ4_stream_t_internal* dictCtx;
|
||||
KLZ4_u32 currentOffset;
|
||||
KLZ4_u32 tableType;
|
||||
KLZ4_u32 dictSize;
|
||||
/* Implicit padding to ensure structure is aligned */
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
const KLZ4_byte* externalDict;
|
||||
size_t extDictSize;
|
||||
const KLZ4_byte* prefixEnd;
|
||||
size_t prefixSize;
|
||||
} KLZ4_streamDecode_t_internal;
|
||||
|
||||
|
||||
/*! KLZ4_stream_t :
|
||||
* Do not use below internal definitions directly !
|
||||
* Declare or allocate an KLZ4_stream_t instead.
|
||||
* KLZ4_stream_t can also be created using KLZ4_createStream(), which is recommended.
|
||||
* The structure definition can be convenient for static allocation
|
||||
* (on stack, or as part of larger structure).
|
||||
* Init this structure with KLZ4_initStream() before first use.
|
||||
* note : only use this definition in association with static linking !
|
||||
* this definition is not API/ABI safe, and may change in future versions.
|
||||
*/
|
||||
#define KLZ4_STREAMSIZE 16416 /* static size, for inter-version compatibility */
|
||||
#define KLZ4_STREAMSIZE_VOIDP (KLZ4_STREAMSIZE / sizeof(void*))
|
||||
#define KLZ4_STREAM_MINSIZE ((1UL << KLZ4_MEMORY_USAGE) + 32) /* static size, for inter-version compatibility */
|
||||
union KLZ4_stream_u {
|
||||
void* table[KLZ4_STREAMSIZE_VOIDP];
|
||||
char minStateSize[KLZ4_STREAM_MINSIZE];
|
||||
KLZ4_stream_t_internal internal_donotuse;
|
||||
}; /* previously typedef'd to KLZ4_stream_t */
|
||||
|
||||
|
|
@ -641,21 +705,25 @@ union KLZ4_stream_u {
|
|||
* In which case, the function will @return NULL.
|
||||
* Note2: An KLZ4_stream_t structure guarantees correct alignment and size.
|
||||
* Note3: Before v1.9.0, use KLZ4_resetStream() instead
|
||||
*/
|
||||
**/
|
||||
KLZ4LIB_API KLZ4_stream_t* KLZ4_initStream (void* buffer, size_t size);
|
||||
|
||||
|
||||
/*! KLZ4_streamDecode_t :
|
||||
* information structure to track an KLZ4 stream during decompression.
|
||||
* init this structure using KLZ4_setStreamDecode() before first use.
|
||||
* note : only use in association with static linking !
|
||||
* this definition is not API/ABI safe,
|
||||
* and may change in a future version !
|
||||
*/
|
||||
#define KLZ4_STREAMDECODESIZE_U64 (4 + ((sizeof(void*)==16) ? 2 : 0) /*AS-400*/ )
|
||||
#define KLZ4_STREAMDECODESIZE (KLZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long))
|
||||
* Never ever use below internal definitions directly !
|
||||
* These definitions are not API/ABI safe, and may change in future versions.
|
||||
* If you need static allocation, declare or allocate an KLZ4_streamDecode_t object.
|
||||
**/
|
||||
typedef struct {
|
||||
const KLZ4_byte* externalDict;
|
||||
const KLZ4_byte* prefixEnd;
|
||||
size_t extDictSize;
|
||||
size_t prefixSize;
|
||||
} KLZ4_streamDecode_t_internal;
|
||||
|
||||
#define KLZ4_STREAMDECODE_MINSIZE 32
|
||||
union KLZ4_streamDecode_u {
|
||||
unsigned long long table[KLZ4_STREAMDECODESIZE_U64];
|
||||
char minStateSize[KLZ4_STREAMDECODE_MINSIZE];
|
||||
KLZ4_streamDecode_t_internal internal_donotuse;
|
||||
} ; /* previously typedef'd to KLZ4_streamDecode_t */
|
||||
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
KLZ4 auto-framing library
|
||||
KLZ4F - KLZ4-Frame library
|
||||
Header File
|
||||
Copyright (C) 2011-2017, Yann Collet.
|
||||
Copyright (C) 2011-2020, Yann Collet.
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -39,7 +39,7 @@
|
|||
* KLZ4F also offers streaming capabilities.
|
||||
*
|
||||
* lz4.h is not required when using lz4frame.h,
|
||||
* except to extract common constant such as KLZ4_VERSION_NUMBER.
|
||||
* except to extract common constants such as KLZ4_VERSION_NUMBER.
|
||||
* */
|
||||
|
||||
#ifndef KLZ4F_H_09782039843
|
||||
|
|
@ -54,12 +54,12 @@ extern "C" {
|
|||
|
||||
|
||||
/**
|
||||
Introduction
|
||||
|
||||
lz4frame.h implements KLZ4 frame specification (doc/lz4_Frame_format.md).
|
||||
lz4frame.h provides frame compression functions that take care
|
||||
of encoding standard metadata alongside KLZ4-compressed blocks.
|
||||
*/
|
||||
* Introduction
|
||||
*
|
||||
* lz4frame.h implements KLZ4 frame specification: see doc/lz4_Frame_format.md .
|
||||
* KLZ4 Frames are compatible with `lz4` CLI,
|
||||
* and designed to be interoperable with any system.
|
||||
**/
|
||||
|
||||
/*-***************************************************************
|
||||
* Compiler specifics
|
||||
|
|
@ -210,7 +210,7 @@ KLZ4FLIB_API int KLZ4F_compressionLevel_max(void); /* v1.8.0+ */
|
|||
* Returns the maximum possible compressed size with KLZ4F_compressFrame() given srcSize and preferences.
|
||||
* `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences.
|
||||
* Note : this result is only usable with KLZ4F_compressFrame().
|
||||
* It may also be used with KLZ4F_compressUpdate() _if no flush() operation_ is performed.
|
||||
* It may also be relevant to KLZ4F_compressUpdate() _only if_ no flush() operation is ever performed.
|
||||
*/
|
||||
KLZ4FLIB_API size_t KLZ4F_compressFrameBound(size_t srcSize, const KLZ4F_preferences_t* preferencesPtr);
|
||||
|
||||
|
|
@ -230,7 +230,7 @@ KLZ4FLIB_API size_t KLZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
|
|||
* Advanced compression functions
|
||||
*************************************/
|
||||
typedef struct KLZ4F_cctx_s KLZ4F_cctx; /* incomplete type */
|
||||
typedef KLZ4F_cctx* KLZ4F_compressionContext_t; /* for compatibility with previous API version */
|
||||
typedef KLZ4F_cctx* KLZ4F_compressionContext_t; /* for compatibility with older APIs, prefer using KLZ4F_cctx */
|
||||
|
||||
typedef struct {
|
||||
unsigned stableSrc; /* 1 == src content will remain present on future calls to KLZ4F_compress(); skip copying src content within tmp buffer */
|
||||
|
|
@ -243,20 +243,27 @@ typedef struct {
|
|||
KLZ4FLIB_API unsigned KLZ4F_getVersion(void);
|
||||
|
||||
/*! KLZ4F_createCompressionContext() :
|
||||
* The first thing to do is to create a compressionContext object, which will be used in all compression operations.
|
||||
* This is achieved using KLZ4F_createCompressionContext(), which takes as argument a version.
|
||||
* The version provided MUST be KLZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL.
|
||||
* The function will provide a pointer to a fully allocated KLZ4F_cctx object.
|
||||
* If @return != zero, there was an error during context creation.
|
||||
* Object can release its memory using KLZ4F_freeCompressionContext();
|
||||
*/
|
||||
* The first thing to do is to create a compressionContext object,
|
||||
* which will keep track of operation state during streaming compression.
|
||||
* This is achieved using KLZ4F_createCompressionContext(), which takes as argument a version,
|
||||
* and a pointer to KLZ4F_cctx*, to write the resulting pointer into.
|
||||
* @version provided MUST be KLZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL.
|
||||
* The function provides a pointer to a fully allocated KLZ4F_cctx object.
|
||||
* @cctxPtr MUST be != NULL.
|
||||
* If @return != zero, context creation failed.
|
||||
* A created compression context can be employed multiple times for consecutive streaming operations.
|
||||
* Once all streaming compression jobs are completed,
|
||||
* the state object can be released using KLZ4F_freeCompressionContext().
|
||||
* Note1 : KLZ4F_freeCompressionContext() is always successful. Its return value can be ignored.
|
||||
* Note2 : KLZ4F_freeCompressionContext() works fine with NULL input pointers (do nothing).
|
||||
**/
|
||||
KLZ4FLIB_API KLZ4F_errorCode_t KLZ4F_createCompressionContext(KLZ4F_cctx** cctxPtr, unsigned version);
|
||||
KLZ4FLIB_API KLZ4F_errorCode_t KLZ4F_freeCompressionContext(KLZ4F_cctx* cctx);
|
||||
|
||||
|
||||
/*---- Compression ----*/
|
||||
|
||||
#define KLZ4F_HEADER_SIZE_MIN 7 /* KLZ4 Frame header size can vary, depending on selected paramaters */
|
||||
#define KLZ4F_HEADER_SIZE_MIN 7 /* KLZ4 Frame header size can vary, depending on selected parameters */
|
||||
#define KLZ4F_HEADER_SIZE_MAX 19
|
||||
|
||||
/* Size in bytes of a block header in little-endian format. Highest bit indicates if block data is uncompressed */
|
||||
|
|
@ -301,8 +308,9 @@ KLZ4FLIB_API size_t KLZ4F_compressBound(size_t srcSize, const KLZ4F_preferences_
|
|||
* Important rule: dstCapacity MUST be large enough to ensure operation success even in worst case situations.
|
||||
* This value is provided by KLZ4F_compressBound().
|
||||
* If this condition is not respected, KLZ4F_compress() will fail (result is an errorCode).
|
||||
* KLZ4F_compressUpdate() doesn't guarantee error recovery.
|
||||
* When an error occurs, compression context must be freed or resized.
|
||||
* After an error, the state is left in a UB state, and must be re-initialized or freed.
|
||||
* If previously an uncompressed block was written, buffered data is flushed
|
||||
* before appending compressed data is continued.
|
||||
* `cOptPtr` is optional : NULL can be provided, in which case all options are set to default.
|
||||
* @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered).
|
||||
* or an error code if it fails (which can be tested using KLZ4F_isError())
|
||||
|
|
@ -347,8 +355,12 @@ typedef struct KLZ4F_dctx_s KLZ4F_dctx; /* incomplete type */
|
|||
typedef KLZ4F_dctx* KLZ4F_decompressionContext_t; /* compatibility with previous API versions */
|
||||
|
||||
typedef struct {
|
||||
unsigned stableDst; /* pledges that last 64KB decompressed data will remain available unmodified. This optimization skips storage operations in tmp buffers. */
|
||||
unsigned reserved[3]; /* must be set to zero for forward compatibility */
|
||||
unsigned stableDst; /* pledges that last 64KB decompressed data will remain available unmodified between invocations.
|
||||
* This optimization skips storage operations in tmp buffers. */
|
||||
unsigned skipChecksums; /* disable checksum calculation and verification, even when one is present in frame, to save CPU time.
|
||||
* Setting this option to 1 once disables all checksums for the rest of the frame. */
|
||||
unsigned reserved1; /* must be set to zero for forward compatibility */
|
||||
unsigned reserved0; /* idem */
|
||||
} KLZ4F_decompressOptions_t;
|
||||
|
||||
|
||||
|
|
@ -356,9 +368,10 @@ typedef struct {
|
|||
|
||||
/*! KLZ4F_createDecompressionContext() :
|
||||
* Create an KLZ4F_dctx object, to track all decompression operations.
|
||||
* The version provided MUST be KLZ4F_VERSION.
|
||||
* The function provides a pointer to an allocated and initialized KLZ4F_dctx object.
|
||||
* The result is an errorCode, which can be tested using KLZ4F_isError().
|
||||
* @version provided MUST be KLZ4F_VERSION.
|
||||
* @dctxPtr MUST be valid.
|
||||
* The function fills @dctxPtr with the value of a pointer to an allocated and initialized KLZ4F_dctx object.
|
||||
* The @return is an errorCode, which can be tested using KLZ4F_isError().
|
||||
* dctx memory can be released using KLZ4F_freeDecompressionContext();
|
||||
* Result of KLZ4F_freeDecompressionContext() indicates current state of decompressionContext when being released.
|
||||
* That is, it should be == 0 if decompression has been completed fully and correctly.
|
||||
|
|
@ -371,6 +384,8 @@ KLZ4FLIB_API KLZ4F_errorCode_t KLZ4F_freeDecompressionContext(KLZ4F_dctx* dctx);
|
|||
* Streaming decompression functions
|
||||
*************************************/
|
||||
|
||||
#define KLZ4F_MAGICNUMBER 0x184D2204U
|
||||
#define KLZ4F_MAGIC_SKIPPABLE_START 0x184D2A50U
|
||||
#define KLZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH 5
|
||||
|
||||
/*! KLZ4F_headerSize() : v1.9.0+
|
||||
|
|
@ -386,7 +401,7 @@ KLZ4FLIB_API size_t KLZ4F_headerSize(const void* src, size_t srcSize);
|
|||
|
||||
/*! KLZ4F_getFrameInfo() :
|
||||
* This function extracts frame parameters (max blockSize, dictID, etc.).
|
||||
* Its usage is optional: user can call KLZ4F_decompress() directly.
|
||||
* Its usage is optional: user can also invoke KLZ4F_decompress() directly.
|
||||
*
|
||||
* Extracted information will fill an existing KLZ4F_frameInfo_t structure.
|
||||
* This can be useful for allocation and dictionary identification purposes.
|
||||
|
|
@ -427,7 +442,8 @@ KLZ4FLIB_API size_t KLZ4F_headerSize(const void* src, size_t srcSize);
|
|||
* note 1 : in case of error, dctx is not modified. Decoding operation can resume from beginning safely.
|
||||
* note 2 : frame parameters are *copied into* an already allocated KLZ4F_frameInfo_t structure.
|
||||
*/
|
||||
KLZ4FLIB_API size_t KLZ4F_getFrameInfo(KLZ4F_dctx* dctx,
|
||||
KLZ4FLIB_API size_t
|
||||
KLZ4F_getFrameInfo(KLZ4F_dctx* dctx,
|
||||
KLZ4F_frameInfo_t* frameInfoPtr,
|
||||
const void* srcBuffer, size_t* srcSizePtr);
|
||||
|
||||
|
|
@ -462,7 +478,8 @@ KLZ4FLIB_API size_t KLZ4F_getFrameInfo(KLZ4F_dctx* dctx,
|
|||
*
|
||||
* After a frame is fully decoded, dctx can be used again to decompress another frame.
|
||||
*/
|
||||
KLZ4FLIB_API size_t KLZ4F_decompress(KLZ4F_dctx* dctx,
|
||||
KLZ4FLIB_API size_t
|
||||
KLZ4F_decompress(KLZ4F_dctx* dctx,
|
||||
void* dstBuffer, size_t* dstSizePtr,
|
||||
const void* srcBuffer, size_t* srcSizePtr,
|
||||
const KLZ4F_decompressOptions_t* dOptPtr);
|
||||
|
|
@ -529,6 +546,8 @@ extern "C" {
|
|||
ITEM(ERROR_headerChecksum_invalid) \
|
||||
ITEM(ERROR_contentChecksum_invalid) \
|
||||
ITEM(ERROR_frameDecoding_alreadyStarted) \
|
||||
ITEM(ERROR_compressionState_uninitialized) \
|
||||
ITEM(ERROR_parameter_null) \
|
||||
ITEM(ERROR_maxCode)
|
||||
|
||||
#define KLZ4F_GENERATE_ENUM(ENUM) KLZ4F_##ENUM,
|
||||
|
|
@ -539,7 +558,31 @@ typedef enum { KLZ4F_LIST_ERRORS(KLZ4F_GENERATE_ENUM)
|
|||
|
||||
KLZ4FLIB_STATIC_API KLZ4F_errorCodes KLZ4F_getErrorCode(size_t functionResult);
|
||||
|
||||
KLZ4FLIB_STATIC_API size_t KLZ4F_getBlockSize(unsigned);
|
||||
|
||||
/*! KLZ4F_getBlockSize() :
|
||||
* Return, in scalar format (size_t),
|
||||
* the maximum block size associated with blockSizeID.
|
||||
**/
|
||||
KLZ4FLIB_STATIC_API size_t KLZ4F_getBlockSize(KLZ4F_blockSizeID_t blockSizeID);
|
||||
|
||||
/*! KLZ4F_uncompressedUpdate() :
|
||||
* KLZ4F_uncompressedUpdate() can be called repetitively to add as much data uncompressed data as necessary.
|
||||
* Important rule: dstCapacity MUST be large enough to store the entire source buffer as
|
||||
* no compression is done for this operation
|
||||
* If this condition is not respected, KLZ4F_uncompressedUpdate() will fail (result is an errorCode).
|
||||
* After an error, the state is left in a UB state, and must be re-initialized or freed.
|
||||
* If previously a compressed block was written, buffered data is flushed
|
||||
* before appending uncompressed data is continued.
|
||||
* This is only supported when KLZ4F_blockIndependent is used
|
||||
* `cOptPtr` is optional : NULL can be provided, in which case all options are set to default.
|
||||
* @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered).
|
||||
* or an error code if it fails (which can be tested using KLZ4F_isError())
|
||||
*/
|
||||
KLZ4FLIB_STATIC_API size_t
|
||||
KLZ4F_uncompressedUpdate(KLZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const void* srcBuffer, size_t srcSize,
|
||||
const KLZ4F_compressOptions_t* cOptPtr);
|
||||
|
||||
/**********************************
|
||||
* Bulk processing dictionary API
|
||||
|
|
@ -583,8 +626,8 @@ KLZ4FLIB_STATIC_API void KLZ4F_freeCDict(KLZ4F_CDict* CDict);
|
|||
* but it's not recommended, as it's the only way to provide dictID in the frame header.
|
||||
* @return : number of bytes written into dstBuffer.
|
||||
* or an error code if it fails (can be tested using KLZ4F_isError()) */
|
||||
KLZ4FLIB_STATIC_API size_t KLZ4F_compressFrame_usingCDict(
|
||||
KLZ4F_cctx* cctx,
|
||||
KLZ4FLIB_STATIC_API size_t
|
||||
KLZ4F_compressFrame_usingCDict(KLZ4F_cctx* cctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const KLZ4F_CDict* cdict,
|
||||
|
|
@ -598,8 +641,8 @@ KLZ4FLIB_STATIC_API size_t KLZ4F_compressFrame_usingCDict(
|
|||
* however, it's the only way to provide dictID in the frame header.
|
||||
* @return : number of bytes written into dstBuffer for the header,
|
||||
* or an error code (which can be tested using KLZ4F_isError()) */
|
||||
KLZ4FLIB_STATIC_API size_t KLZ4F_compressBegin_usingCDict(
|
||||
KLZ4F_cctx* cctx,
|
||||
KLZ4FLIB_STATIC_API size_t
|
||||
KLZ4F_compressBegin_usingCDict(KLZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const KLZ4F_CDict* cdict,
|
||||
const KLZ4F_preferences_t* prefsPtr);
|
||||
|
|
@ -608,14 +651,40 @@ KLZ4FLIB_STATIC_API size_t KLZ4F_compressBegin_usingCDict(
|
|||
/*! KLZ4F_decompress_usingDict() :
|
||||
* Same as KLZ4F_decompress(), using a predefined dictionary.
|
||||
* Dictionary is used "in place", without any preprocessing.
|
||||
* It must remain accessible throughout the entire frame decoding. */
|
||||
KLZ4FLIB_STATIC_API size_t KLZ4F_decompress_usingDict(
|
||||
KLZ4F_dctx* dctxPtr,
|
||||
** It must remain accessible throughout the entire frame decoding. */
|
||||
KLZ4FLIB_STATIC_API size_t
|
||||
KLZ4F_decompress_usingDict(KLZ4F_dctx* dctxPtr,
|
||||
void* dstBuffer, size_t* dstSizePtr,
|
||||
const void* srcBuffer, size_t* srcSizePtr,
|
||||
const void* dict, size_t dictSize,
|
||||
const KLZ4F_decompressOptions_t* decompressOptionsPtr);
|
||||
|
||||
|
||||
/*! Custom memory allocation :
|
||||
* These prototypes make it possible to pass custom allocation/free functions.
|
||||
* KLZ4F_customMem is provided at state creation time, using KLZ4F_create*_advanced() listed below.
|
||||
* All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
|
||||
*/
|
||||
typedef void* (*KLZ4F_AllocFunction) (void* opaqueState, size_t size);
|
||||
typedef void* (*KLZ4F_CallocFunction) (void* opaqueState, size_t size);
|
||||
typedef void (*KLZ4F_FreeFunction) (void* opaqueState, void* address);
|
||||
typedef struct {
|
||||
KLZ4F_AllocFunction customAlloc;
|
||||
KLZ4F_CallocFunction customCalloc; /* optional; when not defined, uses customAlloc + memset */
|
||||
KLZ4F_FreeFunction customFree;
|
||||
void* opaqueState;
|
||||
} KLZ4F_CustomMem;
|
||||
static
|
||||
#ifdef __GNUC__
|
||||
__attribute__((__unused__))
|
||||
#endif
|
||||
KLZ4F_CustomMem const KLZ4F_defaultCMem = { NULL, NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */
|
||||
|
||||
KLZ4FLIB_STATIC_API KLZ4F_cctx* KLZ4F_createCompressionContext_advanced(KLZ4F_CustomMem customMem, unsigned version);
|
||||
KLZ4FLIB_STATIC_API KLZ4F_dctx* KLZ4F_createDecompressionContext_advanced(KLZ4F_CustomMem customMem, unsigned version);
|
||||
KLZ4FLIB_STATIC_API KLZ4F_CDict* KLZ4F_createCDict_advanced(KLZ4F_CustomMem customMem, const void* dictBuffer, size_t dictSize);
|
||||
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
KLZ4 auto-framing library
|
||||
Header File for static linking only
|
||||
Copyright (C) 2011-2016, Yann Collet.
|
||||
Copyright (C) 2011-2020, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/*
|
||||
KLZ4 HC - High Compression Mode of KLZ4
|
||||
Copyright (C) 2011-2017, Yann Collet.
|
||||
Copyright (C) 2011-2020, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
|
|
@ -42,7 +42,7 @@
|
|||
* Select how default compression function will allocate workplace memory,
|
||||
* in stack (0:fastest), or in heap (1:requires malloc()).
|
||||
* Since workplace is rather large, heap mode is recommended.
|
||||
*/
|
||||
**/
|
||||
#ifndef KLZ4HC_HEAPMODE
|
||||
# define KLZ4HC_HEAPMODE 1
|
||||
#endif
|
||||
|
|
@ -99,18 +99,20 @@ static void KLZ4HC_clearTables (KLZ4HC_CCtx_internal* hc4)
|
|||
|
||||
static void KLZ4HC_init_internal (KLZ4HC_CCtx_internal* hc4, const BYTE* start)
|
||||
{
|
||||
uptrval startingOffset = (uptrval)(hc4->end - hc4->base);
|
||||
if (startingOffset > 1 GB) {
|
||||
size_t const bufferSize = (size_t)(hc4->end - hc4->prefixStart);
|
||||
size_t newStartingOffset = bufferSize + hc4->dictLimit;
|
||||
assert(newStartingOffset >= bufferSize); /* check overflow */
|
||||
if (newStartingOffset > 1 GB) {
|
||||
KLZ4HC_clearTables(hc4);
|
||||
startingOffset = 0;
|
||||
newStartingOffset = 0;
|
||||
}
|
||||
startingOffset += 64 KB;
|
||||
hc4->nextToUpdate = (U32) startingOffset;
|
||||
hc4->base = start - startingOffset;
|
||||
newStartingOffset += 64 KB;
|
||||
hc4->nextToUpdate = (U32)newStartingOffset;
|
||||
hc4->prefixStart = start;
|
||||
hc4->end = start;
|
||||
hc4->dictBase = start - startingOffset;
|
||||
hc4->dictLimit = (U32) startingOffset;
|
||||
hc4->lowLimit = (U32) startingOffset;
|
||||
hc4->dictStart = start;
|
||||
hc4->dictLimit = (U32)newStartingOffset;
|
||||
hc4->lowLimit = (U32)newStartingOffset;
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -119,12 +121,15 @@ KLZ4_FORCE_INLINE void KLZ4HC_Insert (KLZ4HC_CCtx_internal* hc4, const BYTE* ip)
|
|||
{
|
||||
U16* const chainTable = hc4->chainTable;
|
||||
U32* const hashTable = hc4->hashTable;
|
||||
const BYTE* const base = hc4->base;
|
||||
U32 const target = (U32)(ip - base);
|
||||
const BYTE* const prefixPtr = hc4->prefixStart;
|
||||
U32 const prefixIdx = hc4->dictLimit;
|
||||
U32 const target = (U32)(ip - prefixPtr) + prefixIdx;
|
||||
U32 idx = hc4->nextToUpdate;
|
||||
assert(ip >= prefixPtr);
|
||||
assert(target >= prefixIdx);
|
||||
|
||||
while (idx < target) {
|
||||
U32 const h = KLZ4HC_hashPtr(base+idx);
|
||||
U32 const h = KLZ4HC_hashPtr(prefixPtr+idx-prefixIdx);
|
||||
size_t delta = idx - hashTable[h];
|
||||
if (delta>KLZ4_DISTANCE_MAX) delta = KLZ4_DISTANCE_MAX;
|
||||
DELTANEXTU16(chainTable, idx) = (U16)delta;
|
||||
|
|
@ -193,15 +198,14 @@ KLZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
|
|||
BYTE const byte = (BYTE)(pattern >> bitOffset);
|
||||
if (*ip != byte) break;
|
||||
ip ++; bitOffset -= 8;
|
||||
}
|
||||
}
|
||||
} }
|
||||
|
||||
return (unsigned)(ip - iStart);
|
||||
}
|
||||
|
||||
/* KLZ4HC_reverseCountPattern() :
|
||||
* pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!)
|
||||
* read using natural platform endianess */
|
||||
* read using natural platform endianness */
|
||||
static unsigned
|
||||
KLZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
|
||||
{
|
||||
|
|
@ -211,7 +215,7 @@ KLZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
|
|||
if (KLZ4_read32(ip-4) != pattern) break;
|
||||
ip -= 4;
|
||||
}
|
||||
{ const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianess */
|
||||
{ const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianness */
|
||||
while (likely(ip>iLow)) {
|
||||
if (ip[-1] != *bytePtr) break;
|
||||
ip--; bytePtr--;
|
||||
|
|
@ -234,28 +238,28 @@ typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e;
|
|||
|
||||
KLZ4_FORCE_INLINE int
|
||||
KLZ4HC_InsertAndGetWiderMatch (
|
||||
KLZ4HC_CCtx_internal* hc4,
|
||||
KLZ4HC_CCtx_internal* const hc4,
|
||||
const BYTE* const ip,
|
||||
const BYTE* const iLowLimit,
|
||||
const BYTE* const iHighLimit,
|
||||
const BYTE* const iLowLimit, const BYTE* const iHighLimit,
|
||||
int longest,
|
||||
const BYTE** matchpos,
|
||||
const BYTE** startpos,
|
||||
const int maxNbAttempts,
|
||||
const int patternAnalysis,
|
||||
const int chainSwap,
|
||||
const int patternAnalysis, const int chainSwap,
|
||||
const dictCtx_directive dict,
|
||||
const HCfavor_e favorDecSpeed)
|
||||
{
|
||||
U16* const chainTable = hc4->chainTable;
|
||||
U32* const HashTable = hc4->hashTable;
|
||||
const KLZ4HC_CCtx_internal * const dictCtx = hc4->dictCtx;
|
||||
const BYTE* const base = hc4->base;
|
||||
const U32 dictLimit = hc4->dictLimit;
|
||||
const BYTE* const lowPrefixPtr = base + dictLimit;
|
||||
const U32 ipIndex = (U32)(ip - base);
|
||||
const U32 lowestMatchIndex = (hc4->lowLimit + (KLZ4_DISTANCE_MAX + 1) > ipIndex) ? hc4->lowLimit : ipIndex - KLZ4_DISTANCE_MAX;
|
||||
const BYTE* const dictBase = hc4->dictBase;
|
||||
const BYTE* const prefixPtr = hc4->prefixStart;
|
||||
const U32 prefixIdx = hc4->dictLimit;
|
||||
const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx;
|
||||
const int withinStartDistance = (hc4->lowLimit + (KLZ4_DISTANCE_MAX + 1) > ipIndex);
|
||||
const U32 lowestMatchIndex = (withinStartDistance) ? hc4->lowLimit : ipIndex - KLZ4_DISTANCE_MAX;
|
||||
const BYTE* const dictStart = hc4->dictStart;
|
||||
const U32 dictIdx = hc4->lowLimit;
|
||||
const BYTE* const dictEnd = dictStart + prefixIdx - dictIdx;
|
||||
int const lookBackLength = (int)(ip-iLowLimit);
|
||||
int nbAttempts = maxNbAttempts;
|
||||
U32 matchChainPos = 0;
|
||||
|
|
@ -277,14 +281,13 @@ KLZ4HC_InsertAndGetWiderMatch (
|
|||
assert(matchIndex < ipIndex);
|
||||
if (favorDecSpeed && (ipIndex - matchIndex < 8)) {
|
||||
/* do nothing */
|
||||
} else if (matchIndex >= dictLimit) { /* within current Prefix */
|
||||
const BYTE* const matchPtr = base + matchIndex;
|
||||
assert(matchPtr >= lowPrefixPtr);
|
||||
} else if (matchIndex >= prefixIdx) { /* within current Prefix */
|
||||
const BYTE* const matchPtr = prefixPtr + matchIndex - prefixIdx;
|
||||
assert(matchPtr < ip);
|
||||
assert(longest >= 1);
|
||||
if (KLZ4_read16(iLowLimit + longest - 1) == KLZ4_read16(matchPtr - lookBackLength + longest - 1)) {
|
||||
if (KLZ4_read32(matchPtr) == pattern) {
|
||||
int const back = lookBackLength ? KLZ4HC_countBack(ip, matchPtr, iLowLimit, lowPrefixPtr) : 0;
|
||||
int const back = lookBackLength ? KLZ4HC_countBack(ip, matchPtr, iLowLimit, prefixPtr) : 0;
|
||||
matchLength = MINMATCH + (int)KLZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit);
|
||||
matchLength -= back;
|
||||
if (matchLength > longest) {
|
||||
|
|
@ -293,20 +296,21 @@ KLZ4HC_InsertAndGetWiderMatch (
|
|||
*startpos = ip + back;
|
||||
} } }
|
||||
} else { /* lowestMatchIndex <= matchIndex < dictLimit */
|
||||
const BYTE* const matchPtr = dictBase + matchIndex;
|
||||
if (KLZ4_read32(matchPtr) == pattern) {
|
||||
const BYTE* const dictStart = dictBase + hc4->lowLimit;
|
||||
const BYTE* const matchPtr = dictStart + (matchIndex - dictIdx);
|
||||
assert(matchIndex >= dictIdx);
|
||||
if ( likely(matchIndex <= prefixIdx - 4)
|
||||
&& (KLZ4_read32(matchPtr) == pattern) ) {
|
||||
int back = 0;
|
||||
const BYTE* vLimit = ip + (dictLimit - matchIndex);
|
||||
const BYTE* vLimit = ip + (prefixIdx - matchIndex);
|
||||
if (vLimit > iHighLimit) vLimit = iHighLimit;
|
||||
matchLength = (int)KLZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
|
||||
if ((ip+matchLength == vLimit) && (vLimit < iHighLimit))
|
||||
matchLength += KLZ4_count(ip+matchLength, lowPrefixPtr, iHighLimit);
|
||||
matchLength += KLZ4_count(ip+matchLength, prefixPtr, iHighLimit);
|
||||
back = lookBackLength ? KLZ4HC_countBack(ip, matchPtr, iLowLimit, dictStart) : 0;
|
||||
matchLength -= back;
|
||||
if (matchLength > longest) {
|
||||
longest = matchLength;
|
||||
*matchpos = base + matchIndex + back; /* virtual pos, relative to ip, to retrieve offset */
|
||||
*matchpos = prefixPtr - prefixIdx + matchIndex + back; /* virtual pos, relative to ip, to retrieve offset */
|
||||
*startpos = ip + back;
|
||||
} } }
|
||||
|
||||
|
|
@ -326,8 +330,7 @@ KLZ4HC_InsertAndGetWiderMatch (
|
|||
distanceToNextMatch = candidateDist;
|
||||
matchChainPos = (U32)pos;
|
||||
accel = 1 << kTrigger;
|
||||
}
|
||||
}
|
||||
} }
|
||||
if (distanceToNextMatch > 1) {
|
||||
if (distanceToNextMatch > matchIndex) break; /* avoid overflow */
|
||||
matchIndex -= distanceToNextMatch;
|
||||
|
|
@ -347,23 +350,24 @@ KLZ4HC_InsertAndGetWiderMatch (
|
|||
repeat = rep_not;
|
||||
} }
|
||||
if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex)
|
||||
&& KLZ4HC_protectDictEnd(dictLimit, matchCandidateIdx) ) {
|
||||
const int extDict = matchCandidateIdx < dictLimit;
|
||||
const BYTE* const matchPtr = (extDict ? dictBase : base) + matchCandidateIdx;
|
||||
&& KLZ4HC_protectDictEnd(prefixIdx, matchCandidateIdx) ) {
|
||||
const int extDict = matchCandidateIdx < prefixIdx;
|
||||
const BYTE* const matchPtr = (extDict ? dictStart - dictIdx : prefixPtr - prefixIdx) + matchCandidateIdx;
|
||||
if (KLZ4_read32(matchPtr) == pattern) { /* good candidate */
|
||||
const BYTE* const dictStart = dictBase + hc4->lowLimit;
|
||||
const BYTE* const iLimit = extDict ? dictBase + dictLimit : iHighLimit;
|
||||
const BYTE* const iLimit = extDict ? dictEnd : iHighLimit;
|
||||
size_t forwardPatternLength = KLZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern);
|
||||
if (extDict && matchPtr + forwardPatternLength == iLimit) {
|
||||
U32 const rotatedPattern = KLZ4HC_rotatePattern(forwardPatternLength, pattern);
|
||||
forwardPatternLength += KLZ4HC_countPattern(lowPrefixPtr, iHighLimit, rotatedPattern);
|
||||
forwardPatternLength += KLZ4HC_countPattern(prefixPtr, iHighLimit, rotatedPattern);
|
||||
}
|
||||
{ const BYTE* const lowestMatchPtr = extDict ? dictStart : lowPrefixPtr;
|
||||
{ const BYTE* const lowestMatchPtr = extDict ? dictStart : prefixPtr;
|
||||
size_t backLength = KLZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern);
|
||||
size_t currentSegmentLength;
|
||||
if (!extDict && matchPtr - backLength == lowPrefixPtr && hc4->lowLimit < dictLimit) {
|
||||
if (!extDict
|
||||
&& matchPtr - backLength == prefixPtr
|
||||
&& dictIdx < prefixIdx) {
|
||||
U32 const rotatedPattern = KLZ4HC_rotatePattern((U32)(-(int)backLength), pattern);
|
||||
backLength += KLZ4HC_reverseCountPattern(dictBase + dictLimit, dictStart, rotatedPattern);
|
||||
backLength += KLZ4HC_reverseCountPattern(dictEnd, dictStart, rotatedPattern);
|
||||
}
|
||||
/* Limit backLength not go further than lowestMatchIndex */
|
||||
backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex);
|
||||
|
|
@ -373,28 +377,28 @@ KLZ4HC_InsertAndGetWiderMatch (
|
|||
if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */
|
||||
&& (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */
|
||||
U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */
|
||||
if (KLZ4HC_protectDictEnd(dictLimit, newMatchIndex))
|
||||
if (KLZ4HC_protectDictEnd(prefixIdx, newMatchIndex))
|
||||
matchIndex = newMatchIndex;
|
||||
else {
|
||||
/* Can only happen if started in the prefix */
|
||||
assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
|
||||
matchIndex = dictLimit;
|
||||
assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict);
|
||||
matchIndex = prefixIdx;
|
||||
}
|
||||
} else {
|
||||
U32 const newMatchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */
|
||||
if (!KLZ4HC_protectDictEnd(dictLimit, newMatchIndex)) {
|
||||
assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
|
||||
matchIndex = dictLimit;
|
||||
if (!KLZ4HC_protectDictEnd(prefixIdx, newMatchIndex)) {
|
||||
assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict);
|
||||
matchIndex = prefixIdx;
|
||||
} else {
|
||||
matchIndex = newMatchIndex;
|
||||
if (lookBackLength==0) { /* no back possible */
|
||||
size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
|
||||
if ((size_t)longest < maxML) {
|
||||
assert(base + matchIndex != ip);
|
||||
if ((size_t)(ip - base) - matchIndex > KLZ4_DISTANCE_MAX) break;
|
||||
assert(prefixPtr - prefixIdx + matchIndex != ip);
|
||||
if ((size_t)(ip - prefixPtr) + prefixIdx - matchIndex > KLZ4_DISTANCE_MAX) break;
|
||||
assert(maxML < 2 GB);
|
||||
longest = (int)maxML;
|
||||
*matchpos = base + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
|
||||
*matchpos = prefixPtr - prefixIdx + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
|
||||
*startpos = ip;
|
||||
}
|
||||
{ U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
|
||||
|
|
@ -413,12 +417,12 @@ KLZ4HC_InsertAndGetWiderMatch (
|
|||
if ( dict == usingDictCtxHc
|
||||
&& nbAttempts > 0
|
||||
&& ipIndex - lowestMatchIndex < KLZ4_DISTANCE_MAX) {
|
||||
size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->base);
|
||||
size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit;
|
||||
U32 dictMatchIndex = dictCtx->hashTable[KLZ4HC_hashPtr(ip)];
|
||||
assert(dictEndOffset <= 1 GB);
|
||||
matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset;
|
||||
while (ipIndex - matchIndex <= KLZ4_DISTANCE_MAX && nbAttempts--) {
|
||||
const BYTE* const matchPtr = dictCtx->base + dictMatchIndex;
|
||||
const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + dictMatchIndex;
|
||||
|
||||
if (KLZ4_read32(matchPtr) == pattern) {
|
||||
int mlt;
|
||||
|
|
@ -426,11 +430,11 @@ KLZ4HC_InsertAndGetWiderMatch (
|
|||
const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex);
|
||||
if (vLimit > iHighLimit) vLimit = iHighLimit;
|
||||
mlt = (int)KLZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
|
||||
back = lookBackLength ? KLZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->base + dictCtx->dictLimit) : 0;
|
||||
back = lookBackLength ? KLZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->prefixStart) : 0;
|
||||
mlt -= back;
|
||||
if (mlt > longest) {
|
||||
longest = mlt;
|
||||
*matchpos = base + matchIndex + back;
|
||||
*matchpos = prefixPtr - prefixIdx + matchIndex + back;
|
||||
*startpos = ip + back;
|
||||
} }
|
||||
|
||||
|
|
@ -442,8 +446,8 @@ KLZ4HC_InsertAndGetWiderMatch (
|
|||
return longest;
|
||||
}
|
||||
|
||||
KLZ4_FORCE_INLINE
|
||||
int KLZ4HC_InsertAndFindBestMatch(KLZ4HC_CCtx_internal* const hc4, /* Index table will be updated */
|
||||
KLZ4_FORCE_INLINE int
|
||||
KLZ4HC_InsertAndFindBestMatch(KLZ4HC_CCtx_internal* const hc4, /* Index table will be updated */
|
||||
const BYTE* const ip, const BYTE* const iLimit,
|
||||
const BYTE** matchpos,
|
||||
const int maxNbAttempts,
|
||||
|
|
@ -751,7 +755,7 @@ _last_literals:
|
|||
} else {
|
||||
*op++ = (BYTE)(lastRunSize << ML_BITS);
|
||||
}
|
||||
memcpy(op, anchor, lastRunSize);
|
||||
KLZ4_memcpy(op, anchor, lastRunSize);
|
||||
op += lastRunSize;
|
||||
}
|
||||
|
||||
|
|
@ -884,13 +888,13 @@ KLZ4HC_compress_generic_dictCtx (
|
|||
limitedOutput_directive limit
|
||||
)
|
||||
{
|
||||
const size_t position = (size_t)(ctx->end - ctx->base) - ctx->lowLimit;
|
||||
const size_t position = (size_t)(ctx->end - ctx->prefixStart) + (ctx->dictLimit - ctx->lowLimit);
|
||||
assert(ctx->dictCtx != NULL);
|
||||
if (position >= 64 KB) {
|
||||
ctx->dictCtx = NULL;
|
||||
return KLZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
|
||||
} else if (position == 0 && *srcSizePtr > 4 KB) {
|
||||
memcpy(ctx, ctx->dictCtx, sizeof(KLZ4HC_CCtx_internal));
|
||||
KLZ4_memcpy(ctx, ctx->dictCtx, sizeof(KLZ4HC_CCtx_internal));
|
||||
KLZ4HC_setExternalDict(ctx, (const BYTE *)src);
|
||||
ctx->compressionLevel = (short)cLevel;
|
||||
return KLZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
|
||||
|
|
@ -953,13 +957,15 @@ int KLZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int sr
|
|||
|
||||
int KLZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
|
||||
{
|
||||
int cSize;
|
||||
#if defined(KLZ4HC_HEAPMODE) && KLZ4HC_HEAPMODE==1
|
||||
KLZ4_streamHC_t* const statePtr = (KLZ4_streamHC_t*)ALLOC(sizeof(KLZ4_streamHC_t));
|
||||
if (statePtr==NULL) return 0;
|
||||
#else
|
||||
KLZ4_streamHC_t state;
|
||||
KLZ4_streamHC_t* const statePtr = &state;
|
||||
#endif
|
||||
int const cSize = KLZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
|
||||
cSize = KLZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
|
||||
#if defined(KLZ4HC_HEAPMODE) && KLZ4HC_HEAPMODE==1
|
||||
FREEMEM(statePtr);
|
||||
#endif
|
||||
|
|
@ -982,6 +988,7 @@ int KLZ4_compress_HC_destSize(void* state, const char* source, char* dest, int*
|
|||
* Streaming Functions
|
||||
**************************************/
|
||||
/* allocation */
|
||||
#if !defined(KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
|
||||
KLZ4_streamHC_t* KLZ4_createStreamHC(void)
|
||||
{
|
||||
KLZ4_streamHC_t* const state =
|
||||
|
|
@ -998,13 +1005,12 @@ int KLZ4_freeStreamHC (KLZ4_streamHC_t* KLZ4_streamHCPtr)
|
|||
FREEMEM(KLZ4_streamHCPtr);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
KLZ4_streamHC_t* KLZ4_initStreamHC (void* buffer, size_t size)
|
||||
{
|
||||
KLZ4_streamHC_t* const KLZ4_streamHCPtr = (KLZ4_streamHC_t*)buffer;
|
||||
/* if compilation fails here, KLZ4_STREAMHCSIZE must be increased */
|
||||
KLZ4_STATIC_ASSERT(sizeof(KLZ4HC_CCtx_internal) <= KLZ4_STREAMHCSIZE);
|
||||
DEBUGLOG(4, "KLZ4_initStreamHC(%p, %u)", buffer, (unsigned)size);
|
||||
/* check conditions */
|
||||
if (buffer == NULL) return NULL;
|
||||
|
|
@ -1030,9 +1036,13 @@ void KLZ4_resetStreamHC_fast (KLZ4_streamHC_t* KLZ4_streamHCPtr, int compression
|
|||
if (KLZ4_streamHCPtr->internal_donotuse.dirty) {
|
||||
KLZ4_initStreamHC(KLZ4_streamHCPtr, sizeof(*KLZ4_streamHCPtr));
|
||||
} else {
|
||||
/* preserve end - base : can trigger clearTable's threshold */
|
||||
KLZ4_streamHCPtr->internal_donotuse.end -= (uptrval)KLZ4_streamHCPtr->internal_donotuse.base;
|
||||
KLZ4_streamHCPtr->internal_donotuse.base = NULL;
|
||||
/* preserve end - prefixStart : can trigger clearTable's threshold */
|
||||
if (KLZ4_streamHCPtr->internal_donotuse.end != NULL) {
|
||||
KLZ4_streamHCPtr->internal_donotuse.end -= (uptrval)KLZ4_streamHCPtr->internal_donotuse.prefixStart;
|
||||
} else {
|
||||
assert(KLZ4_streamHCPtr->internal_donotuse.prefixStart == NULL);
|
||||
}
|
||||
KLZ4_streamHCPtr->internal_donotuse.prefixStart = NULL;
|
||||
KLZ4_streamHCPtr->internal_donotuse.dictCtx = NULL;
|
||||
}
|
||||
KLZ4_setCompressionLevel(KLZ4_streamHCPtr, compressionLevel);
|
||||
|
|
@ -1083,14 +1093,14 @@ void KLZ4_attach_HC_dictionary(KLZ4_streamHC_t *working_stream, const KLZ4_strea
|
|||
static void KLZ4HC_setExternalDict(KLZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
|
||||
{
|
||||
DEBUGLOG(4, "KLZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock);
|
||||
if (ctxPtr->end >= ctxPtr->base + ctxPtr->dictLimit + 4)
|
||||
if (ctxPtr->end >= ctxPtr->prefixStart + 4)
|
||||
KLZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */
|
||||
|
||||
/* Only one memory segment for extDict, so any previous extDict is lost at this stage */
|
||||
ctxPtr->lowLimit = ctxPtr->dictLimit;
|
||||
ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
|
||||
ctxPtr->dictBase = ctxPtr->base;
|
||||
ctxPtr->base = newBlock - ctxPtr->dictLimit;
|
||||
ctxPtr->dictStart = ctxPtr->prefixStart;
|
||||
ctxPtr->dictLimit += (U32)(ctxPtr->end - ctxPtr->prefixStart);
|
||||
ctxPtr->prefixStart = newBlock;
|
||||
ctxPtr->end = newBlock;
|
||||
ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */
|
||||
|
||||
|
|
@ -1109,11 +1119,11 @@ KLZ4_compressHC_continue_generic (KLZ4_streamHC_t* KLZ4_streamHCPtr,
|
|||
KLZ4_streamHCPtr, src, *srcSizePtr, limit);
|
||||
assert(ctxPtr != NULL);
|
||||
/* auto-init if forgotten */
|
||||
if (ctxPtr->base == NULL) KLZ4HC_init_internal (ctxPtr, (const BYTE*) src);
|
||||
if (ctxPtr->prefixStart == NULL) KLZ4HC_init_internal (ctxPtr, (const BYTE*) src);
|
||||
|
||||
/* Check overflow */
|
||||
if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 GB) {
|
||||
size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base) - ctxPtr->dictLimit;
|
||||
if ((size_t)(ctxPtr->end - ctxPtr->prefixStart) + ctxPtr->dictLimit > 2 GB) {
|
||||
size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->prefixStart);
|
||||
if (dictSize > 64 KB) dictSize = 64 KB;
|
||||
KLZ4_loadDictHC(KLZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize);
|
||||
}
|
||||
|
|
@ -1124,13 +1134,16 @@ KLZ4_compressHC_continue_generic (KLZ4_streamHC_t* KLZ4_streamHCPtr,
|
|||
|
||||
/* Check overlapping input/dictionary space */
|
||||
{ const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr;
|
||||
const BYTE* const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
|
||||
const BYTE* const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
|
||||
const BYTE* const dictBegin = ctxPtr->dictStart;
|
||||
const BYTE* const dictEnd = ctxPtr->dictStart + (ctxPtr->dictLimit - ctxPtr->lowLimit);
|
||||
if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) {
|
||||
if (sourceEnd > dictEnd) sourceEnd = dictEnd;
|
||||
ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
|
||||
if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit;
|
||||
} }
|
||||
ctxPtr->lowLimit += (U32)(sourceEnd - ctxPtr->dictStart);
|
||||
ctxPtr->dictStart += (U32)(sourceEnd - ctxPtr->dictStart);
|
||||
if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) {
|
||||
ctxPtr->lowLimit = ctxPtr->dictLimit;
|
||||
ctxPtr->dictStart = ctxPtr->prefixStart;
|
||||
} } }
|
||||
|
||||
return KLZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
|
||||
}
|
||||
|
|
@ -1158,7 +1171,7 @@ int KLZ4_compress_HC_continue_destSize (KLZ4_streamHC_t* KLZ4_streamHCPtr, const
|
|||
int KLZ4_saveDictHC (KLZ4_streamHC_t* KLZ4_streamHCPtr, char* safeBuffer, int dictSize)
|
||||
{
|
||||
KLZ4HC_CCtx_internal* const streamPtr = &KLZ4_streamHCPtr->internal_donotuse;
|
||||
int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
|
||||
int const prefixSize = (int)(streamPtr->end - streamPtr->prefixStart);
|
||||
DEBUGLOG(5, "KLZ4_saveDictHC(%p, %p, %d)", KLZ4_streamHCPtr, safeBuffer, dictSize);
|
||||
assert(prefixSize >= 0);
|
||||
if (dictSize > 64 KB) dictSize = 64 KB;
|
||||
|
|
@ -1166,12 +1179,13 @@ int KLZ4_saveDictHC (KLZ4_streamHC_t* KLZ4_streamHCPtr, char* safeBuffer, int di
|
|||
if (dictSize > prefixSize) dictSize = prefixSize;
|
||||
if (safeBuffer == NULL) assert(dictSize == 0);
|
||||
if (dictSize > 0)
|
||||
memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
|
||||
{ U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
|
||||
KLZ4_memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
|
||||
{ U32 const endIndex = (U32)(streamPtr->end - streamPtr->prefixStart) + streamPtr->dictLimit;
|
||||
streamPtr->end = (const BYTE*)safeBuffer + dictSize;
|
||||
streamPtr->base = streamPtr->end - endIndex;
|
||||
streamPtr->prefixStart = streamPtr->end - dictSize;
|
||||
streamPtr->dictLimit = endIndex - (U32)dictSize;
|
||||
streamPtr->lowLimit = endIndex - (U32)dictSize;
|
||||
streamPtr->dictStart = streamPtr->prefixStart;
|
||||
if (streamPtr->nextToUpdate < streamPtr->dictLimit)
|
||||
streamPtr->nextToUpdate = streamPtr->dictLimit;
|
||||
}
|
||||
|
|
@ -1199,7 +1213,7 @@ int KLZ4_compressHC_limitedOutput_continue (KLZ4_streamHC_t* ctx, const char* sr
|
|||
|
||||
|
||||
/* Deprecated streaming functions */
|
||||
int KLZ4_sizeofStreamStateHC(void) { return KLZ4_STREAMHCSIZE; }
|
||||
int KLZ4_sizeofStreamStateHC(void) { return sizeof(KLZ4_streamHC_t); }
|
||||
|
||||
/* state is presumed correctly sized, aka >= sizeof(KLZ4_streamHC_t)
|
||||
* @return : 0 on success, !=0 if error */
|
||||
|
|
@ -1211,6 +1225,7 @@ int KLZ4_resetStreamStateHC(void* state, char* inputBuffer)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if !defined(KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
|
||||
void* KLZ4_createHC (const char* inputBuffer)
|
||||
{
|
||||
KLZ4_streamHC_t* const hc4 = KLZ4_createStreamHC();
|
||||
|
|
@ -1225,6 +1240,7 @@ int KLZ4_freeHC (void* KLZ4HC_Data)
|
|||
FREEMEM(KLZ4HC_Data);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int KLZ4_compressHC2_continue (void* KLZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel)
|
||||
{
|
||||
|
|
@ -1238,11 +1254,11 @@ int KLZ4_compressHC2_limitedOutput_continue (void* KLZ4HC_Data, const char* src,
|
|||
|
||||
char* KLZ4_slideInputBufferHC(void* KLZ4HC_Data)
|
||||
{
|
||||
KLZ4_streamHC_t *ctx = (KLZ4_streamHC_t*)KLZ4HC_Data;
|
||||
const BYTE *bufferStart = ctx->internal_donotuse.base + ctx->internal_donotuse.lowLimit;
|
||||
KLZ4_streamHC_t* const ctx = (KLZ4_streamHC_t*)KLZ4HC_Data;
|
||||
const BYTE* bufferStart = ctx->internal_donotuse.prefixStart - ctx->internal_donotuse.dictLimit + ctx->internal_donotuse.lowLimit;
|
||||
KLZ4_resetStreamHC_fast(ctx, ctx->internal_donotuse.compressionLevel);
|
||||
/* avoid const char * -> char * conversion warning :( */
|
||||
return (char *)(uptrval)bufferStart;
|
||||
return (char*)(uptrval)bufferStart;
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1325,7 +1341,7 @@ static int KLZ4HC_compress_optimal ( KLZ4HC_CCtx_internal* ctx,
|
|||
{
|
||||
int retval = 0;
|
||||
#define TRAILING_LITERALS 3
|
||||
#ifdef KLZ4HC_HEAPMODE
|
||||
#if defined(KLZ4HC_HEAPMODE) && KLZ4HC_HEAPMODE==1
|
||||
KLZ4HC_optimal_t* const opt = (KLZ4HC_optimal_t*)ALLOC(sizeof(KLZ4HC_optimal_t) * (KLZ4_OPT_NUM + TRAILING_LITERALS));
|
||||
#else
|
||||
KLZ4HC_optimal_t opt[KLZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */
|
||||
|
|
@ -1343,7 +1359,7 @@ static int KLZ4HC_compress_optimal ( KLZ4HC_CCtx_internal* ctx,
|
|||
const BYTE* ovref = NULL;
|
||||
|
||||
/* init */
|
||||
#ifdef KLZ4HC_HEAPMODE
|
||||
#if defined(KLZ4HC_HEAPMODE) && KLZ4HC_HEAPMODE==1
|
||||
if (opt == NULL) goto _return_label;
|
||||
#endif
|
||||
DEBUGLOG(5, "KLZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity);
|
||||
|
|
@ -1575,7 +1591,7 @@ _last_literals:
|
|||
} else {
|
||||
*op++ = (BYTE)(lastRunSize << ML_BITS);
|
||||
}
|
||||
memcpy(op, anchor, lastRunSize);
|
||||
KLZ4_memcpy(op, anchor, lastRunSize);
|
||||
op += lastRunSize;
|
||||
}
|
||||
|
||||
|
|
@ -1608,7 +1624,7 @@ if (limit == fillOutput) {
|
|||
goto _last_literals;
|
||||
}
|
||||
_return_label:
|
||||
#ifdef KLZ4HC_HEAPMODE
|
||||
#if defined(KLZ4HC_HEAPMODE) && KLZ4HC_HEAPMODE==1
|
||||
FREEMEM(opt);
|
||||
#endif
|
||||
return retval;
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
KLZ4 HC - High Compression Mode of KLZ4
|
||||
Header File
|
||||
Copyright (C) 2011-2017, Yann Collet.
|
||||
Copyright (C) 2011-2020, Yann Collet.
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -198,14 +198,17 @@ KLZ4LIB_API int KLZ4_saveDictHC (KLZ4_streamHC_t* streamHCPtr, char* safeBuffer,
|
|||
#define KLZ4HC_HASH_MASK (KLZ4HC_HASHTABLESIZE - 1)
|
||||
|
||||
|
||||
/* Never ever use these definitions directly !
|
||||
* Declare or allocate an KLZ4_streamHC_t instead.
|
||||
**/
|
||||
typedef struct KLZ4HC_CCtx_internal KLZ4HC_CCtx_internal;
|
||||
struct KLZ4HC_CCtx_internal
|
||||
{
|
||||
KLZ4_u32 hashTable[KLZ4HC_HASHTABLESIZE];
|
||||
KLZ4_u16 chainTable[KLZ4HC_MAXD];
|
||||
const KLZ4_byte* end; /* next block here to continue on current prefix */
|
||||
const KLZ4_byte* base; /* All index relative to this position */
|
||||
const KLZ4_byte* dictBase; /* alternate base for extDict */
|
||||
const KLZ4_byte* prefixStart; /* Indexes relative to this position */
|
||||
const KLZ4_byte* dictStart; /* alternate reference for extDict */
|
||||
KLZ4_u32 dictLimit; /* below that point, need extDict */
|
||||
KLZ4_u32 lowLimit; /* below that point, no more dict */
|
||||
KLZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
|
||||
|
|
@ -216,20 +219,15 @@ struct KLZ4HC_CCtx_internal
|
|||
const KLZ4HC_CCtx_internal* dictCtx;
|
||||
};
|
||||
|
||||
|
||||
/* Do not use these definitions directly !
|
||||
* Declare or allocate an KLZ4_streamHC_t instead.
|
||||
*/
|
||||
#define KLZ4_STREAMHCSIZE 262200 /* static size, for inter-version compatibility */
|
||||
#define KLZ4_STREAMHCSIZE_VOIDP (KLZ4_STREAMHCSIZE / sizeof(void*))
|
||||
#define KLZ4_STREAMHC_MINSIZE 262200 /* static size, for inter-version compatibility */
|
||||
union KLZ4_streamHC_u {
|
||||
void* table[KLZ4_STREAMHCSIZE_VOIDP];
|
||||
char minStateSize[KLZ4_STREAMHC_MINSIZE];
|
||||
KLZ4HC_CCtx_internal internal_donotuse;
|
||||
}; /* previously typedef'd to KLZ4_streamHC_t */
|
||||
|
||||
/* KLZ4_streamHC_t :
|
||||
* This structure allows static allocation of KLZ4 HC streaming state.
|
||||
* This can be used to allocate statically, on state, or as part of a larger structure.
|
||||
* This can be used to allocate statically on stack, or as part of a larger structure.
|
||||
*
|
||||
* Such state **must** be initialized using KLZ4_initStreamHC() before first use.
|
||||
*
|
||||
|
|
@ -244,7 +242,7 @@ union KLZ4_streamHC_u {
|
|||
* Required before first use of a statically allocated KLZ4_streamHC_t.
|
||||
* Before v1.9.0 : use KLZ4_resetStreamHC() instead
|
||||
*/
|
||||
KLZ4LIB_API KLZ4_streamHC_t* KLZ4_initStreamHC (void* buffer, size_t size);
|
||||
KLZ4LIB_API KLZ4_streamHC_t* KLZ4_initStreamHC(void* buffer, size_t size);
|
||||
|
||||
|
||||
/*-************************************
|
||||
|
|
@ -272,9 +270,11 @@ KLZ4_DEPRECATED("use KLZ4_compress_HC_continue() instead") KLZ4LIB_API int KLZ4_
|
|||
* KLZ4_slideInputBufferHC() will truncate the history of the stream, rather
|
||||
* than preserve a window-sized chunk of history.
|
||||
*/
|
||||
#if !defined(KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
|
||||
KLZ4_DEPRECATED("use KLZ4_createStreamHC() instead") KLZ4LIB_API void* KLZ4_createHC (const char* inputBuffer);
|
||||
KLZ4_DEPRECATED("use KLZ4_saveDictHC() instead") KLZ4LIB_API char* KLZ4_slideInputBufferHC (void* KLZ4HC_Data);
|
||||
KLZ4_DEPRECATED("use KLZ4_freeStreamHC() instead") KLZ4LIB_API int KLZ4_freeHC (void* KLZ4HC_Data);
|
||||
#endif
|
||||
KLZ4_DEPRECATED("use KLZ4_saveDictHC() instead") KLZ4LIB_API char* KLZ4_slideInputBufferHC (void* KLZ4HC_Data);
|
||||
KLZ4_DEPRECATED("use KLZ4_compress_HC_continue() instead") KLZ4LIB_API int KLZ4_compressHC2_continue (void* KLZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel);
|
||||
KLZ4_DEPRECATED("use KLZ4_compress_HC_continue() instead") KLZ4LIB_API int KLZ4_compressHC2_limitedOutput_continue (void* KLZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
|
||||
KLZ4_DEPRECATED("use KLZ4_createStreamHC() instead") KLZ4LIB_API int KLZ4_sizeofStreamStateHC(void);
|
||||
|
|
@ -305,7 +305,7 @@ KLZ4LIB_API void KLZ4_resetStreamHC (KLZ4_streamHC_t* streamHCPtr, int compressi
|
|||
* They should not be linked from DLL,
|
||||
* as there is no guarantee of API stability yet.
|
||||
* Prototypes will be promoted to "stable" status
|
||||
* after successfull usage in real-life scenarios.
|
||||
* after successful usage in real-life scenarios.
|
||||
***************************************************/
|
||||
#ifdef KLZ4_HC_STATIC_LINKING_ONLY /* protection macro */
|
||||
#ifndef KLZ4_HC_SLO_098092834
|
||||
|
|
|
|||
|
|
@ -0,0 +1,917 @@
|
|||
/* Common parts of the nanopb library. Most of these are quite low-level
|
||||
* stuff. For the high-level interface, see pb_encode.h and pb_decode.h.
|
||||
*/
|
||||
|
||||
#ifndef PB_H_INCLUDED
|
||||
#define PB_H_INCLUDED
|
||||
|
||||
/*****************************************************************
|
||||
* Nanopb compilation time options. You can change these here by *
|
||||
* uncommenting the lines, or on the compiler command line. *
|
||||
*****************************************************************/
|
||||
|
||||
/* Enable support for dynamically allocated fields */
|
||||
/* #define PB_ENABLE_MALLOC 1 */
|
||||
|
||||
/* Define this if your CPU / compiler combination does not support
|
||||
* unaligned memory access to packed structures. Note that packed
|
||||
* structures are only used when requested in .proto options. */
|
||||
/* #define PB_NO_PACKED_STRUCTS 1 */
|
||||
|
||||
/* Increase the number of required fields that are tracked.
|
||||
* A compiler warning will tell if you need this. */
|
||||
/* #define PB_MAX_REQUIRED_FIELDS 256 */
|
||||
|
||||
/* Add support for tag numbers > 65536 and fields larger than 65536 bytes. */
|
||||
/* #define PB_FIELD_32BIT 1 */
|
||||
|
||||
/* Disable support for error messages in order to save some code space. */
|
||||
/* #define PB_NO_ERRMSG 1 */
|
||||
|
||||
/* Disable support for custom streams (support only memory buffers). */
|
||||
/* #define PB_BUFFER_ONLY 1 */
|
||||
|
||||
/* Disable support for 64-bit datatypes, for compilers without int64_t
|
||||
or to save some code space. */
|
||||
/* #define PB_WITHOUT_64BIT 1 */
|
||||
|
||||
/* Don't encode scalar arrays as packed. This is only to be used when
|
||||
* the decoder on the receiving side cannot process packed scalar arrays.
|
||||
* Such example is older protobuf.js. */
|
||||
/* #define PB_ENCODE_ARRAYS_UNPACKED 1 */
|
||||
|
||||
/* Enable conversion of doubles to floats for platforms that do not
|
||||
* support 64-bit doubles. Most commonly AVR. */
|
||||
/* #define PB_CONVERT_DOUBLE_FLOAT 1 */
|
||||
|
||||
/* Check whether incoming strings are valid UTF-8 sequences. Slows down
|
||||
* the string processing slightly and slightly increases code size. */
|
||||
/* #define PB_VALIDATE_UTF8 1 */
|
||||
|
||||
/* This can be defined if the platform is little-endian and has 8-bit bytes.
|
||||
* Normally it is automatically detected based on __BYTE_ORDER__ macro. */
|
||||
/* #define PB_LITTLE_ENDIAN_8BIT 1 */
|
||||
|
||||
/* Configure static assert mechanism. Instead of changing these, set your
|
||||
* compiler to C11 standard mode if possible. */
|
||||
/* #define PB_C99_STATIC_ASSERT 1 */
|
||||
/* #define PB_NO_STATIC_ASSERT 1 */
|
||||
|
||||
/******************************************************************
|
||||
* You usually don't need to change anything below this line. *
|
||||
* Feel free to look around and use the defined macros, though. *
|
||||
******************************************************************/
|
||||
|
||||
|
||||
/* Version of the nanopb library. Just in case you want to check it in
|
||||
* your own program. */
|
||||
#define NANOPB_VERSION "nanopb-0.4.8-dev"
|
||||
|
||||
/* Include all the system headers needed by nanopb. You will need the
|
||||
* definitions of the following:
|
||||
* - strlen, memcpy, memset functions
|
||||
* - [u]int_least8_t, uint_fast8_t, [u]int_least16_t, [u]int32_t, [u]int64_t
|
||||
* - size_t
|
||||
* - bool
|
||||
*
|
||||
* If you don't have the standard header files, you can instead provide
|
||||
* a custom header that defines or includes all this. In that case,
|
||||
* define PB_SYSTEM_HEADER to the path of this file.
|
||||
*/
|
||||
#ifdef PB_SYSTEM_HEADER
|
||||
#include PB_SYSTEM_HEADER
|
||||
#else
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include <stdbool.h>
|
||||
#include <string.h>
|
||||
#include <limits.h>
|
||||
|
||||
#ifdef PB_ENABLE_MALLOC
|
||||
#include <stdlib.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Macro for defining packed structures (compiler dependent).
|
||||
* This just reduces memory requirements, but is not required.
|
||||
*/
|
||||
#if defined(PB_NO_PACKED_STRUCTS)
|
||||
/* Disable struct packing */
|
||||
# define PB_PACKED_STRUCT_START
|
||||
# define PB_PACKED_STRUCT_END
|
||||
# define pb_packed
|
||||
#elif defined(__GNUC__) || defined(__clang__)
|
||||
/* For GCC and clang */
|
||||
# define PB_PACKED_STRUCT_START
|
||||
# define PB_PACKED_STRUCT_END
|
||||
# define pb_packed __attribute__((packed))
|
||||
#elif defined(__ICCARM__) || defined(__CC_ARM)
|
||||
/* For IAR ARM and Keil MDK-ARM compilers */
|
||||
# define PB_PACKED_STRUCT_START _Pragma("pack(push, 1)")
|
||||
# define PB_PACKED_STRUCT_END _Pragma("pack(pop)")
|
||||
# define pb_packed
|
||||
#elif defined(_MSC_VER) && (_MSC_VER >= 1500)
|
||||
/* For Microsoft Visual C++ */
|
||||
# define PB_PACKED_STRUCT_START __pragma(pack(push, 1))
|
||||
# define PB_PACKED_STRUCT_END __pragma(pack(pop))
|
||||
# define pb_packed
|
||||
#else
|
||||
/* Unknown compiler */
|
||||
# define PB_PACKED_STRUCT_START
|
||||
# define PB_PACKED_STRUCT_END
|
||||
# define pb_packed
|
||||
#endif
|
||||
|
||||
/* Detect endianness */
|
||||
#ifndef PB_LITTLE_ENDIAN_8BIT
|
||||
#if ((defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN) || \
|
||||
(defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
|
||||
defined(__LITTLE_ENDIAN__) || defined(__ARMEL__) || \
|
||||
defined(__THUMBEL__) || defined(__AARCH64EL__) || defined(_MIPSEL) || \
|
||||
defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM)) \
|
||||
&& CHAR_BIT == 8
|
||||
#define PB_LITTLE_ENDIAN_8BIT 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Handly macro for suppressing unreferenced-parameter compiler warnings. */
|
||||
#ifndef PB_UNUSED
|
||||
#define PB_UNUSED(x) (void)(x)
|
||||
#endif
|
||||
|
||||
/* Harvard-architecture processors may need special attributes for storing
|
||||
* field information in program memory. */
|
||||
#ifndef PB_PROGMEM
|
||||
#ifdef __AVR__
|
||||
#include <avr/pgmspace.h>
|
||||
#define PB_PROGMEM PROGMEM
|
||||
#define PB_PROGMEM_READU32(x) pgm_read_dword(&x)
|
||||
#else
|
||||
#define PB_PROGMEM
|
||||
#define PB_PROGMEM_READU32(x) (x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Compile-time assertion, used for checking compatible compilation options.
|
||||
* If this does not work properly on your compiler, use
|
||||
* #define PB_NO_STATIC_ASSERT to disable it.
|
||||
*
|
||||
* But before doing that, check carefully the error message / place where it
|
||||
* comes from to see if the error has a real cause. Unfortunately the error
|
||||
* message is not always very clear to read, but you can see the reason better
|
||||
* in the place where the PB_STATIC_ASSERT macro was called.
|
||||
*/
|
||||
#ifndef PB_NO_STATIC_ASSERT
|
||||
# ifndef PB_STATIC_ASSERT
|
||||
# if defined(__ICCARM__)
|
||||
/* IAR has static_assert keyword but no _Static_assert */
|
||||
# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG);
|
||||
# elif defined(_MSC_VER) && (!defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112)
|
||||
/* MSVC in C89 mode supports static_assert() keyword anyway */
|
||||
# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG);
|
||||
# elif defined(PB_C99_STATIC_ASSERT)
|
||||
/* Classic negative-size-array static assert mechanism */
|
||||
# define PB_STATIC_ASSERT(COND,MSG) typedef char PB_STATIC_ASSERT_MSG(MSG, __LINE__, __COUNTER__)[(COND)?1:-1];
|
||||
# define PB_STATIC_ASSERT_MSG(MSG, LINE, COUNTER) PB_STATIC_ASSERT_MSG_(MSG, LINE, COUNTER)
|
||||
# define PB_STATIC_ASSERT_MSG_(MSG, LINE, COUNTER) pb_static_assertion_##MSG##_##LINE##_##COUNTER
|
||||
# elif defined(__cplusplus)
|
||||
/* C++11 standard static_assert mechanism */
|
||||
# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG);
|
||||
# else
|
||||
/* C11 standard _Static_assert mechanism */
|
||||
# define PB_STATIC_ASSERT(COND,MSG) _Static_assert(COND,#MSG);
|
||||
# endif
|
||||
# endif
|
||||
#else
|
||||
/* Static asserts disabled by PB_NO_STATIC_ASSERT */
|
||||
# define PB_STATIC_ASSERT(COND,MSG)
|
||||
#endif
|
||||
|
||||
/* Test that PB_STATIC_ASSERT works
|
||||
* If you get errors here, you may need to do one of these:
|
||||
* - Enable C11 standard support in your compiler
|
||||
* - Define PB_C99_STATIC_ASSERT to enable C99 standard support
|
||||
* - Define PB_NO_STATIC_ASSERT to disable static asserts altogether
|
||||
*/
|
||||
PB_STATIC_ASSERT(1, STATIC_ASSERT_IS_NOT_WORKING)
|
||||
|
||||
/* Number of required fields to keep track of. */
|
||||
#ifndef PB_MAX_REQUIRED_FIELDS
|
||||
#define PB_MAX_REQUIRED_FIELDS 64
|
||||
#endif
|
||||
|
||||
#if PB_MAX_REQUIRED_FIELDS < 64
|
||||
#error You should not lower PB_MAX_REQUIRED_FIELDS from the default value (64).
|
||||
#endif
|
||||
|
||||
#ifdef PB_WITHOUT_64BIT
|
||||
#ifdef PB_CONVERT_DOUBLE_FLOAT
|
||||
/* Cannot use doubles without 64-bit types */
|
||||
#undef PB_CONVERT_DOUBLE_FLOAT
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* List of possible field types. These are used in the autogenerated code.
|
||||
* Least-significant 4 bits tell the scalar type
|
||||
* Most-significant 4 bits specify repeated/required/packed etc.
|
||||
*/
|
||||
|
||||
typedef uint_least8_t pb_type_t;
|
||||
|
||||
/**** Field data types ****/
|
||||
|
||||
/* Numeric types */
|
||||
#define PB_LTYPE_BOOL 0x00U /* bool */
|
||||
#define PB_LTYPE_VARINT 0x01U /* int32, int64, enum, bool */
|
||||
#define PB_LTYPE_UVARINT 0x02U /* uint32, uint64 */
|
||||
#define PB_LTYPE_SVARINT 0x03U /* sint32, sint64 */
|
||||
#define PB_LTYPE_FIXED32 0x04U /* fixed32, sfixed32, float */
|
||||
#define PB_LTYPE_FIXED64 0x05U /* fixed64, sfixed64, double */
|
||||
|
||||
/* Marker for last packable field type. */
|
||||
#define PB_LTYPE_LAST_PACKABLE 0x05U
|
||||
|
||||
/* Byte array with pre-allocated buffer.
|
||||
* data_size is the length of the allocated PB_BYTES_ARRAY structure. */
|
||||
#define PB_LTYPE_BYTES 0x06U
|
||||
|
||||
/* String with pre-allocated buffer.
|
||||
* data_size is the maximum length. */
|
||||
#define PB_LTYPE_STRING 0x07U
|
||||
|
||||
/* Submessage
|
||||
* submsg_fields is pointer to field descriptions */
|
||||
#define PB_LTYPE_SUBMESSAGE 0x08U
|
||||
|
||||
/* Submessage with pre-decoding callback
|
||||
* The pre-decoding callback is stored as pb_callback_t right before pSize.
|
||||
* submsg_fields is pointer to field descriptions */
|
||||
#define PB_LTYPE_SUBMSG_W_CB 0x09U
|
||||
|
||||
/* Extension pseudo-field
|
||||
* The field contains a pointer to pb_extension_t */
|
||||
#define PB_LTYPE_EXTENSION 0x0AU
|
||||
|
||||
/* Byte array with inline, pre-allocated byffer.
|
||||
* data_size is the length of the inline, allocated buffer.
|
||||
* This differs from PB_LTYPE_BYTES by defining the element as
|
||||
* pb_byte_t[data_size] rather than pb_bytes_array_t. */
|
||||
#define PB_LTYPE_FIXED_LENGTH_BYTES 0x0BU
|
||||
|
||||
/* Number of declared LTYPES */
|
||||
#define PB_LTYPES_COUNT 0x0CU
|
||||
#define PB_LTYPE_MASK 0x0FU
|
||||
|
||||
/**** Field repetition rules ****/
|
||||
|
||||
#define PB_HTYPE_REQUIRED 0x00U
|
||||
#define PB_HTYPE_OPTIONAL 0x10U
|
||||
#define PB_HTYPE_SINGULAR 0x10U
|
||||
#define PB_HTYPE_REPEATED 0x20U
|
||||
#define PB_HTYPE_FIXARRAY 0x20U
|
||||
#define PB_HTYPE_ONEOF 0x30U
|
||||
#define PB_HTYPE_MASK 0x30U
|
||||
|
||||
/**** Field allocation types ****/
|
||||
|
||||
#define PB_ATYPE_STATIC 0x00U
|
||||
#define PB_ATYPE_POINTER 0x80U
|
||||
#define PB_ATYPE_CALLBACK 0x40U
|
||||
#define PB_ATYPE_MASK 0xC0U
|
||||
|
||||
#define PB_ATYPE(x) ((x) & PB_ATYPE_MASK)
|
||||
#define PB_HTYPE(x) ((x) & PB_HTYPE_MASK)
|
||||
#define PB_LTYPE(x) ((x) & PB_LTYPE_MASK)
|
||||
#define PB_LTYPE_IS_SUBMSG(x) (PB_LTYPE(x) == PB_LTYPE_SUBMESSAGE || \
|
||||
PB_LTYPE(x) == PB_LTYPE_SUBMSG_W_CB)
|
||||
|
||||
/* Data type used for storing sizes of struct fields
|
||||
* and array counts.
|
||||
*/
|
||||
#if defined(PB_FIELD_32BIT)
|
||||
typedef uint32_t pb_size_t;
|
||||
typedef int32_t pb_ssize_t;
|
||||
#else
|
||||
typedef uint_least16_t pb_size_t;
|
||||
typedef int_least16_t pb_ssize_t;
|
||||
#endif
|
||||
#define PB_SIZE_MAX ((pb_size_t)-1)
|
||||
|
||||
/* Data type for storing encoded data and other byte streams.
|
||||
* This typedef exists to support platforms where uint8_t does not exist.
|
||||
* You can regard it as equivalent on uint8_t on other platforms.
|
||||
*/
|
||||
typedef uint_least8_t pb_byte_t;
|
||||
|
||||
/* Forward declaration of struct types */
|
||||
typedef struct pb_istream_s pb_istream_t;
|
||||
typedef struct pb_ostream_s pb_ostream_t;
|
||||
typedef struct pb_field_iter_s pb_field_iter_t;
|
||||
|
||||
/* This structure is used in auto-generated constants
|
||||
* to specify struct fields.
|
||||
*/
|
||||
typedef struct pb_msgdesc_s pb_msgdesc_t;
|
||||
struct pb_msgdesc_s {
|
||||
const uint32_t *field_info;
|
||||
const pb_msgdesc_t * const * submsg_info;
|
||||
const pb_byte_t *default_value;
|
||||
|
||||
bool (*field_callback)(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_iter_t *field);
|
||||
|
||||
pb_size_t field_count;
|
||||
pb_size_t required_field_count;
|
||||
pb_size_t largest_tag;
|
||||
};
|
||||
|
||||
/* Iterator for message descriptor */
|
||||
struct pb_field_iter_s {
|
||||
const pb_msgdesc_t *descriptor; /* Pointer to message descriptor constant */
|
||||
void *message; /* Pointer to start of the structure */
|
||||
|
||||
pb_size_t index; /* Index of the field */
|
||||
pb_size_t field_info_index; /* Index to descriptor->field_info array */
|
||||
pb_size_t required_field_index; /* Index that counts only the required fields */
|
||||
pb_size_t submessage_index; /* Index that counts only submessages */
|
||||
|
||||
pb_size_t tag; /* Tag of current field */
|
||||
pb_size_t data_size; /* sizeof() of a single item */
|
||||
pb_size_t array_size; /* Number of array entries */
|
||||
pb_type_t type; /* Type of current field */
|
||||
|
||||
void *pField; /* Pointer to current field in struct */
|
||||
void *pData; /* Pointer to current data contents. Different than pField for arrays and pointers. */
|
||||
void *pSize; /* Pointer to count/has field */
|
||||
|
||||
const pb_msgdesc_t *submsg_desc; /* For submessage fields, pointer to field descriptor for the submessage. */
|
||||
};
|
||||
|
||||
/* For compatibility with legacy code */
|
||||
typedef pb_field_iter_t pb_field_t;
|
||||
|
||||
/* Make sure that the standard integer types are of the expected sizes.
|
||||
* Otherwise fixed32/fixed64 fields can break.
|
||||
*
|
||||
* If you get errors here, it probably means that your stdint.h is not
|
||||
* correct for your platform.
|
||||
*/
|
||||
#ifndef PB_WITHOUT_64BIT
|
||||
PB_STATIC_ASSERT(sizeof(int64_t) == 2 * sizeof(int32_t), INT64_T_WRONG_SIZE)
|
||||
PB_STATIC_ASSERT(sizeof(uint64_t) == 2 * sizeof(uint32_t), UINT64_T_WRONG_SIZE)
|
||||
#endif
|
||||
|
||||
/* This structure is used for 'bytes' arrays.
|
||||
* It has the number of bytes in the beginning, and after that an array.
|
||||
* Note that actual structs used will have a different length of bytes array.
|
||||
*/
|
||||
#define PB_BYTES_ARRAY_T(n) struct { pb_size_t size; pb_byte_t bytes[n]; }
|
||||
#define PB_BYTES_ARRAY_T_ALLOCSIZE(n) ((size_t)n + offsetof(pb_bytes_array_t, bytes))
|
||||
|
||||
struct pb_bytes_array_s {
|
||||
pb_size_t size;
|
||||
pb_byte_t bytes[1];
|
||||
};
|
||||
typedef struct pb_bytes_array_s pb_bytes_array_t;
|
||||
|
||||
/* This structure is used for giving the callback function.
|
||||
* It is stored in the message structure and filled in by the method that
|
||||
* calls pb_decode.
|
||||
*
|
||||
* The decoding callback will be given a limited-length stream
|
||||
* If the wire type was string, the length is the length of the string.
|
||||
* If the wire type was a varint/fixed32/fixed64, the length is the length
|
||||
* of the actual value.
|
||||
* The function may be called multiple times (especially for repeated types,
|
||||
* but also otherwise if the message happens to contain the field multiple
|
||||
* times.)
|
||||
*
|
||||
* The encoding callback will receive the actual output stream.
|
||||
* It should write all the data in one call, including the field tag and
|
||||
* wire type. It can write multiple fields.
|
||||
*
|
||||
* The callback can be null if you want to skip a field.
|
||||
*/
|
||||
typedef struct pb_callback_s pb_callback_t;
|
||||
struct pb_callback_s {
|
||||
/* Callback functions receive a pointer to the arg field.
|
||||
* You can access the value of the field as *arg, and modify it if needed.
|
||||
*/
|
||||
union {
|
||||
bool (*decode)(pb_istream_t *stream, const pb_field_t *field, void **arg);
|
||||
bool (*encode)(pb_ostream_t *stream, const pb_field_t *field, void * const *arg);
|
||||
} funcs;
|
||||
|
||||
/* Free arg for use by callback */
|
||||
void *arg;
|
||||
};
|
||||
|
||||
extern bool pb_default_field_callback(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_t *field);
|
||||
|
||||
/* Wire types. Library user needs these only in encoder callbacks. */
|
||||
typedef enum {
|
||||
PB_WT_VARINT = 0,
|
||||
PB_WT_64BIT = 1,
|
||||
PB_WT_STRING = 2,
|
||||
PB_WT_32BIT = 5,
|
||||
PB_WT_PACKED = 255 /* PB_WT_PACKED is internal marker for packed arrays. */
|
||||
} pb_wire_type_t;
|
||||
|
||||
/* Structure for defining the handling of unknown/extension fields.
|
||||
* Usually the pb_extension_type_t structure is automatically generated,
|
||||
* while the pb_extension_t structure is created by the user. However,
|
||||
* if you want to catch all unknown fields, you can also create a custom
|
||||
* pb_extension_type_t with your own callback.
|
||||
*/
|
||||
typedef struct pb_extension_type_s pb_extension_type_t;
|
||||
typedef struct pb_extension_s pb_extension_t;
|
||||
struct pb_extension_type_s {
|
||||
/* Called for each unknown field in the message.
|
||||
* If you handle the field, read off all of its data and return true.
|
||||
* If you do not handle the field, do not read anything and return true.
|
||||
* If you run into an error, return false.
|
||||
* Set to NULL for default handler.
|
||||
*/
|
||||
bool (*decode)(pb_istream_t *stream, pb_extension_t *extension,
|
||||
uint32_t tag, pb_wire_type_t wire_type);
|
||||
|
||||
/* Called once after all regular fields have been encoded.
|
||||
* If you have something to write, do so and return true.
|
||||
* If you do not have anything to write, just return true.
|
||||
* If you run into an error, return false.
|
||||
* Set to NULL for default handler.
|
||||
*/
|
||||
bool (*encode)(pb_ostream_t *stream, const pb_extension_t *extension);
|
||||
|
||||
/* Free field for use by the callback. */
|
||||
const void *arg;
|
||||
};
|
||||
|
||||
struct pb_extension_s {
|
||||
/* Type describing the extension field. Usually you'll initialize
|
||||
* this to a pointer to the automatically generated structure. */
|
||||
const pb_extension_type_t *type;
|
||||
|
||||
/* Destination for the decoded data. This must match the datatype
|
||||
* of the extension field. */
|
||||
void *dest;
|
||||
|
||||
/* Pointer to the next extension handler, or NULL.
|
||||
* If this extension does not match a field, the next handler is
|
||||
* automatically called. */
|
||||
pb_extension_t *next;
|
||||
|
||||
/* The decoder sets this to true if the extension was found.
|
||||
* Ignored for encoding. */
|
||||
bool found;
|
||||
};
|
||||
|
||||
#define pb_extension_init_zero {NULL,NULL,NULL,false}
|
||||
|
||||
/* Memory allocation functions to use. You can define pb_realloc and
|
||||
* pb_free to custom functions if you want. */
|
||||
#ifdef PB_ENABLE_MALLOC
|
||||
# ifndef pb_realloc
|
||||
# define pb_realloc(ptr, size) realloc(ptr, size)
|
||||
# endif
|
||||
# ifndef pb_free
|
||||
# define pb_free(ptr) free(ptr)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* This is used to inform about need to regenerate .pb.h/.pb.c files. */
|
||||
#define PB_PROTO_HEADER_VERSION 40
|
||||
|
||||
/* These macros are used to declare pb_field_t's in the constant array. */
|
||||
/* Size of a structure member, in bytes. */
|
||||
#define pb_membersize(st, m) (sizeof ((st*)0)->m)
|
||||
/* Number of entries in an array. */
|
||||
#define pb_arraysize(st, m) (pb_membersize(st, m) / pb_membersize(st, m[0]))
|
||||
/* Delta from start of one member to the start of another member. */
|
||||
#define pb_delta(st, m1, m2) ((int)offsetof(st, m1) - (int)offsetof(st, m2))
|
||||
|
||||
/* Force expansion of macro value */
|
||||
#define PB_EXPAND(x) x
|
||||
|
||||
/* Binding of a message field set into a specific structure */
|
||||
#define PB_BIND(msgname, structname, width) \
|
||||
const uint32_t structname ## _field_info[] PB_PROGMEM = \
|
||||
{ \
|
||||
msgname ## _FIELDLIST(PB_GEN_FIELD_INFO_ ## width, structname) \
|
||||
0 \
|
||||
}; \
|
||||
const pb_msgdesc_t* const structname ## _submsg_info[] = \
|
||||
{ \
|
||||
msgname ## _FIELDLIST(PB_GEN_SUBMSG_INFO, structname) \
|
||||
NULL \
|
||||
}; \
|
||||
const pb_msgdesc_t structname ## _msg = \
|
||||
{ \
|
||||
structname ## _field_info, \
|
||||
structname ## _submsg_info, \
|
||||
msgname ## _DEFAULT, \
|
||||
msgname ## _CALLBACK, \
|
||||
0 msgname ## _FIELDLIST(PB_GEN_FIELD_COUNT, structname), \
|
||||
0 msgname ## _FIELDLIST(PB_GEN_REQ_FIELD_COUNT, structname), \
|
||||
0 msgname ## _FIELDLIST(PB_GEN_LARGEST_TAG, structname), \
|
||||
}; \
|
||||
msgname ## _FIELDLIST(PB_GEN_FIELD_INFO_ASSERT_ ## width, structname)
|
||||
|
||||
#define PB_GEN_FIELD_COUNT(structname, atype, htype, ltype, fieldname, tag) +1
|
||||
#define PB_GEN_REQ_FIELD_COUNT(structname, atype, htype, ltype, fieldname, tag) \
|
||||
+ (PB_HTYPE_ ## htype == PB_HTYPE_REQUIRED)
|
||||
#define PB_GEN_LARGEST_TAG(structname, atype, htype, ltype, fieldname, tag) \
|
||||
* 0 + tag
|
||||
|
||||
/* X-macro for generating the entries in struct_field_info[] array. */
|
||||
#define PB_GEN_FIELD_INFO_1(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_1(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_GEN_FIELD_INFO_2(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_2(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_GEN_FIELD_INFO_4(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_4(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_GEN_FIELD_INFO_8(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_8(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_GEN_FIELD_INFO_AUTO(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_AUTO2(PB_FIELDINFO_WIDTH_AUTO(_PB_ATYPE_ ## atype, _PB_HTYPE_ ## htype, _PB_LTYPE_ ## ltype), \
|
||||
tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_FIELDINFO_AUTO2(width, tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_FIELDINFO_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size)
|
||||
|
||||
#define PB_FIELDINFO_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_FIELDINFO_ ## width(tag, type, data_offset, data_size, size_offset, array_size)
|
||||
|
||||
/* X-macro for generating asserts that entries fit in struct_field_info[] array.
|
||||
* The structure of macros here must match the structure above in PB_GEN_FIELD_INFO_x(),
|
||||
* but it is not easily reused because of how macro substitutions work. */
|
||||
#define PB_GEN_FIELD_INFO_ASSERT_1(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_ASSERT_1(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_GEN_FIELD_INFO_ASSERT_2(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_ASSERT_2(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_GEN_FIELD_INFO_ASSERT_4(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_ASSERT_4(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_GEN_FIELD_INFO_ASSERT_8(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_ASSERT_8(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_GEN_FIELD_INFO_ASSERT_AUTO(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_ASSERT_AUTO2(PB_FIELDINFO_WIDTH_AUTO(_PB_ATYPE_ ## atype, _PB_HTYPE_ ## htype, _PB_LTYPE_ ## ltype), \
|
||||
tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_FIELDINFO_ASSERT_AUTO2(width, tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_FIELDINFO_ASSERT_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size)
|
||||
|
||||
#define PB_FIELDINFO_ASSERT_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_FIELDINFO_ASSERT_ ## width(tag, type, data_offset, data_size, size_offset, array_size)
|
||||
|
||||
#define PB_DATA_OFFSET_STATIC(htype, structname, fieldname) PB_DO ## htype(structname, fieldname)
|
||||
#define PB_DATA_OFFSET_POINTER(htype, structname, fieldname) PB_DO ## htype(structname, fieldname)
|
||||
#define PB_DATA_OFFSET_CALLBACK(htype, structname, fieldname) PB_DO ## htype(structname, fieldname)
|
||||
#define PB_DO_PB_HTYPE_REQUIRED(structname, fieldname) offsetof(structname, fieldname)
|
||||
#define PB_DO_PB_HTYPE_SINGULAR(structname, fieldname) offsetof(structname, fieldname)
|
||||
#define PB_DO_PB_HTYPE_ONEOF(structname, fieldname) offsetof(structname, PB_ONEOF_NAME(FULL, fieldname))
|
||||
#define PB_DO_PB_HTYPE_OPTIONAL(structname, fieldname) offsetof(structname, fieldname)
|
||||
#define PB_DO_PB_HTYPE_REPEATED(structname, fieldname) offsetof(structname, fieldname)
|
||||
#define PB_DO_PB_HTYPE_FIXARRAY(structname, fieldname) offsetof(structname, fieldname)
|
||||
|
||||
#define PB_SIZE_OFFSET_STATIC(htype, structname, fieldname) PB_SO ## htype(structname, fieldname)
|
||||
#define PB_SIZE_OFFSET_POINTER(htype, structname, fieldname) PB_SO_PTR ## htype(structname, fieldname)
|
||||
#define PB_SIZE_OFFSET_CALLBACK(htype, structname, fieldname) PB_SO_CB ## htype(structname, fieldname)
|
||||
#define PB_SO_PB_HTYPE_REQUIRED(structname, fieldname) 0
|
||||
#define PB_SO_PB_HTYPE_SINGULAR(structname, fieldname) 0
|
||||
#define PB_SO_PB_HTYPE_ONEOF(structname, fieldname) PB_SO_PB_HTYPE_ONEOF2(structname, PB_ONEOF_NAME(FULL, fieldname), PB_ONEOF_NAME(UNION, fieldname))
|
||||
#define PB_SO_PB_HTYPE_ONEOF2(structname, fullname, unionname) PB_SO_PB_HTYPE_ONEOF3(structname, fullname, unionname)
|
||||
#define PB_SO_PB_HTYPE_ONEOF3(structname, fullname, unionname) pb_delta(structname, fullname, which_ ## unionname)
|
||||
#define PB_SO_PB_HTYPE_OPTIONAL(structname, fieldname) pb_delta(structname, fieldname, has_ ## fieldname)
|
||||
#define PB_SO_PB_HTYPE_REPEATED(structname, fieldname) pb_delta(structname, fieldname, fieldname ## _count)
|
||||
#define PB_SO_PB_HTYPE_FIXARRAY(structname, fieldname) 0
|
||||
#define PB_SO_PTR_PB_HTYPE_REQUIRED(structname, fieldname) 0
|
||||
#define PB_SO_PTR_PB_HTYPE_SINGULAR(structname, fieldname) 0
|
||||
#define PB_SO_PTR_PB_HTYPE_ONEOF(structname, fieldname) PB_SO_PB_HTYPE_ONEOF(structname, fieldname)
|
||||
#define PB_SO_PTR_PB_HTYPE_OPTIONAL(structname, fieldname) 0
|
||||
#define PB_SO_PTR_PB_HTYPE_REPEATED(structname, fieldname) PB_SO_PB_HTYPE_REPEATED(structname, fieldname)
|
||||
#define PB_SO_PTR_PB_HTYPE_FIXARRAY(structname, fieldname) 0
|
||||
#define PB_SO_CB_PB_HTYPE_REQUIRED(structname, fieldname) 0
|
||||
#define PB_SO_CB_PB_HTYPE_SINGULAR(structname, fieldname) 0
|
||||
#define PB_SO_CB_PB_HTYPE_ONEOF(structname, fieldname) PB_SO_PB_HTYPE_ONEOF(structname, fieldname)
|
||||
#define PB_SO_CB_PB_HTYPE_OPTIONAL(structname, fieldname) 0
|
||||
#define PB_SO_CB_PB_HTYPE_REPEATED(structname, fieldname) 0
|
||||
#define PB_SO_CB_PB_HTYPE_FIXARRAY(structname, fieldname) 0
|
||||
|
||||
#define PB_ARRAY_SIZE_STATIC(htype, structname, fieldname) PB_AS ## htype(structname, fieldname)
|
||||
#define PB_ARRAY_SIZE_POINTER(htype, structname, fieldname) PB_AS_PTR ## htype(structname, fieldname)
|
||||
#define PB_ARRAY_SIZE_CALLBACK(htype, structname, fieldname) 1
|
||||
#define PB_AS_PB_HTYPE_REQUIRED(structname, fieldname) 1
|
||||
#define PB_AS_PB_HTYPE_SINGULAR(structname, fieldname) 1
|
||||
#define PB_AS_PB_HTYPE_OPTIONAL(structname, fieldname) 1
|
||||
#define PB_AS_PB_HTYPE_ONEOF(structname, fieldname) 1
|
||||
#define PB_AS_PB_HTYPE_REPEATED(structname, fieldname) pb_arraysize(structname, fieldname)
|
||||
#define PB_AS_PB_HTYPE_FIXARRAY(structname, fieldname) pb_arraysize(structname, fieldname)
|
||||
#define PB_AS_PTR_PB_HTYPE_REQUIRED(structname, fieldname) 1
|
||||
#define PB_AS_PTR_PB_HTYPE_SINGULAR(structname, fieldname) 1
|
||||
#define PB_AS_PTR_PB_HTYPE_OPTIONAL(structname, fieldname) 1
|
||||
#define PB_AS_PTR_PB_HTYPE_ONEOF(structname, fieldname) 1
|
||||
#define PB_AS_PTR_PB_HTYPE_REPEATED(structname, fieldname) 1
|
||||
#define PB_AS_PTR_PB_HTYPE_FIXARRAY(structname, fieldname) pb_arraysize(structname, fieldname[0])
|
||||
|
||||
#define PB_DATA_SIZE_STATIC(htype, structname, fieldname) PB_DS ## htype(structname, fieldname)
|
||||
#define PB_DATA_SIZE_POINTER(htype, structname, fieldname) PB_DS_PTR ## htype(structname, fieldname)
|
||||
#define PB_DATA_SIZE_CALLBACK(htype, structname, fieldname) PB_DS_CB ## htype(structname, fieldname)
|
||||
#define PB_DS_PB_HTYPE_REQUIRED(structname, fieldname) pb_membersize(structname, fieldname)
|
||||
#define PB_DS_PB_HTYPE_SINGULAR(structname, fieldname) pb_membersize(structname, fieldname)
|
||||
#define PB_DS_PB_HTYPE_OPTIONAL(structname, fieldname) pb_membersize(structname, fieldname)
|
||||
#define PB_DS_PB_HTYPE_ONEOF(structname, fieldname) pb_membersize(structname, PB_ONEOF_NAME(FULL, fieldname))
|
||||
#define PB_DS_PB_HTYPE_REPEATED(structname, fieldname) pb_membersize(structname, fieldname[0])
|
||||
#define PB_DS_PB_HTYPE_FIXARRAY(structname, fieldname) pb_membersize(structname, fieldname[0])
|
||||
#define PB_DS_PTR_PB_HTYPE_REQUIRED(structname, fieldname) pb_membersize(structname, fieldname[0])
|
||||
#define PB_DS_PTR_PB_HTYPE_SINGULAR(structname, fieldname) pb_membersize(structname, fieldname[0])
|
||||
#define PB_DS_PTR_PB_HTYPE_OPTIONAL(structname, fieldname) pb_membersize(structname, fieldname[0])
|
||||
#define PB_DS_PTR_PB_HTYPE_ONEOF(structname, fieldname) pb_membersize(structname, PB_ONEOF_NAME(FULL, fieldname)[0])
|
||||
#define PB_DS_PTR_PB_HTYPE_REPEATED(structname, fieldname) pb_membersize(structname, fieldname[0])
|
||||
#define PB_DS_PTR_PB_HTYPE_FIXARRAY(structname, fieldname) pb_membersize(structname, fieldname[0][0])
|
||||
#define PB_DS_CB_PB_HTYPE_REQUIRED(structname, fieldname) pb_membersize(structname, fieldname)
|
||||
#define PB_DS_CB_PB_HTYPE_SINGULAR(structname, fieldname) pb_membersize(structname, fieldname)
|
||||
#define PB_DS_CB_PB_HTYPE_OPTIONAL(structname, fieldname) pb_membersize(structname, fieldname)
|
||||
#define PB_DS_CB_PB_HTYPE_ONEOF(structname, fieldname) pb_membersize(structname, PB_ONEOF_NAME(FULL, fieldname))
|
||||
#define PB_DS_CB_PB_HTYPE_REPEATED(structname, fieldname) pb_membersize(structname, fieldname)
|
||||
#define PB_DS_CB_PB_HTYPE_FIXARRAY(structname, fieldname) pb_membersize(structname, fieldname)
|
||||
|
||||
#define PB_ONEOF_NAME(type, tuple) PB_EXPAND(PB_ONEOF_NAME_ ## type tuple)
|
||||
#define PB_ONEOF_NAME_UNION(unionname,membername,fullname) unionname
|
||||
#define PB_ONEOF_NAME_MEMBER(unionname,membername,fullname) membername
|
||||
#define PB_ONEOF_NAME_FULL(unionname,membername,fullname) fullname
|
||||
|
||||
#define PB_GEN_SUBMSG_INFO(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_SUBMSG_INFO_ ## htype(_PB_LTYPE_ ## ltype, structname, fieldname)
|
||||
|
||||
#define PB_SUBMSG_INFO_REQUIRED(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE)
|
||||
#define PB_SUBMSG_INFO_SINGULAR(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE)
|
||||
#define PB_SUBMSG_INFO_OPTIONAL(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE)
|
||||
#define PB_SUBMSG_INFO_ONEOF(ltype, structname, fieldname) PB_SUBMSG_INFO_ONEOF2(ltype, structname, PB_ONEOF_NAME(UNION, fieldname), PB_ONEOF_NAME(MEMBER, fieldname))
|
||||
#define PB_SUBMSG_INFO_ONEOF2(ltype, structname, unionname, membername) PB_SUBMSG_INFO_ONEOF3(ltype, structname, unionname, membername)
|
||||
#define PB_SUBMSG_INFO_ONEOF3(ltype, structname, unionname, membername) PB_SI ## ltype(structname ## _ ## unionname ## _ ## membername ## _MSGTYPE)
|
||||
#define PB_SUBMSG_INFO_REPEATED(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE)
|
||||
#define PB_SUBMSG_INFO_FIXARRAY(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE)
|
||||
#define PB_SI_PB_LTYPE_BOOL(t)
|
||||
#define PB_SI_PB_LTYPE_BYTES(t)
|
||||
#define PB_SI_PB_LTYPE_DOUBLE(t)
|
||||
#define PB_SI_PB_LTYPE_ENUM(t)
|
||||
#define PB_SI_PB_LTYPE_UENUM(t)
|
||||
#define PB_SI_PB_LTYPE_FIXED32(t)
|
||||
#define PB_SI_PB_LTYPE_FIXED64(t)
|
||||
#define PB_SI_PB_LTYPE_FLOAT(t)
|
||||
#define PB_SI_PB_LTYPE_INT32(t)
|
||||
#define PB_SI_PB_LTYPE_INT64(t)
|
||||
#define PB_SI_PB_LTYPE_MESSAGE(t) PB_SUBMSG_DESCRIPTOR(t)
|
||||
#define PB_SI_PB_LTYPE_MSG_W_CB(t) PB_SUBMSG_DESCRIPTOR(t)
|
||||
#define PB_SI_PB_LTYPE_SFIXED32(t)
|
||||
#define PB_SI_PB_LTYPE_SFIXED64(t)
|
||||
#define PB_SI_PB_LTYPE_SINT32(t)
|
||||
#define PB_SI_PB_LTYPE_SINT64(t)
|
||||
#define PB_SI_PB_LTYPE_STRING(t)
|
||||
#define PB_SI_PB_LTYPE_UINT32(t)
|
||||
#define PB_SI_PB_LTYPE_UINT64(t)
|
||||
#define PB_SI_PB_LTYPE_EXTENSION(t)
|
||||
#define PB_SI_PB_LTYPE_FIXED_LENGTH_BYTES(t)
|
||||
#define PB_SUBMSG_DESCRIPTOR(t) &(t ## _msg),
|
||||
|
||||
/* The field descriptors use a variable width format, with width of either
|
||||
* 1, 2, 4 or 8 of 32-bit words. The two lowest bytes of the first byte always
|
||||
* encode the descriptor size, 6 lowest bits of field tag number, and 8 bits
|
||||
* of the field type.
|
||||
*
|
||||
* Descriptor size is encoded as 0 = 1 word, 1 = 2 words, 2 = 4 words, 3 = 8 words.
|
||||
*
|
||||
* Formats, listed starting with the least significant bit of the first word.
|
||||
* 1 word: [2-bit len] [6-bit tag] [8-bit type] [8-bit data_offset] [4-bit size_offset] [4-bit data_size]
|
||||
*
|
||||
* 2 words: [2-bit len] [6-bit tag] [8-bit type] [12-bit array_size] [4-bit size_offset]
|
||||
* [16-bit data_offset] [12-bit data_size] [4-bit tag>>6]
|
||||
*
|
||||
* 4 words: [2-bit len] [6-bit tag] [8-bit type] [16-bit array_size]
|
||||
* [8-bit size_offset] [24-bit tag>>6]
|
||||
* [32-bit data_offset]
|
||||
* [32-bit data_size]
|
||||
*
|
||||
* 8 words: [2-bit len] [6-bit tag] [8-bit type] [16-bit reserved]
|
||||
* [8-bit size_offset] [24-bit tag>>6]
|
||||
* [32-bit data_offset]
|
||||
* [32-bit data_size]
|
||||
* [32-bit array_size]
|
||||
* [32-bit reserved]
|
||||
* [32-bit reserved]
|
||||
* [32-bit reserved]
|
||||
*/
|
||||
|
||||
#define PB_FIELDINFO_1(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
(0 | (((tag) << 2) & 0xFF) | ((type) << 8) | (((uint32_t)(data_offset) & 0xFF) << 16) | \
|
||||
(((uint32_t)(size_offset) & 0x0F) << 24) | (((uint32_t)(data_size) & 0x0F) << 28)),
|
||||
|
||||
#define PB_FIELDINFO_2(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
(1 | (((tag) << 2) & 0xFF) | ((type) << 8) | (((uint32_t)(array_size) & 0xFFF) << 16) | (((uint32_t)(size_offset) & 0x0F) << 28)), \
|
||||
(((uint32_t)(data_offset) & 0xFFFF) | (((uint32_t)(data_size) & 0xFFF) << 16) | (((uint32_t)(tag) & 0x3c0) << 22)),
|
||||
|
||||
#define PB_FIELDINFO_4(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
(2 | (((tag) << 2) & 0xFF) | ((type) << 8) | (((uint32_t)(array_size) & 0xFFFF) << 16)), \
|
||||
((uint32_t)(int_least8_t)(size_offset) | (((uint32_t)(tag) << 2) & 0xFFFFFF00)), \
|
||||
(data_offset), (data_size),
|
||||
|
||||
#define PB_FIELDINFO_8(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
(3 | (((tag) << 2) & 0xFF) | ((type) << 8)), \
|
||||
((uint32_t)(int_least8_t)(size_offset) | (((uint32_t)(tag) << 2) & 0xFFFFFF00)), \
|
||||
(data_offset), (data_size), (array_size), 0, 0, 0,
|
||||
|
||||
/* These assertions verify that the field information fits in the allocated space.
|
||||
* The generator tries to automatically determine the correct width that can fit all
|
||||
* data associated with a message. These asserts will fail only if there has been a
|
||||
* problem in the automatic logic - this may be worth reporting as a bug. As a workaround,
|
||||
* you can increase the descriptor width by defining PB_FIELDINFO_WIDTH or by setting
|
||||
* descriptorsize option in .options file.
|
||||
*/
|
||||
#define PB_FITS(value,bits) ((uint32_t)(value) < ((uint32_t)1<<bits))
|
||||
#define PB_FIELDINFO_ASSERT_1(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_STATIC_ASSERT(PB_FITS(tag,6) && PB_FITS(data_offset,8) && PB_FITS(size_offset,4) && PB_FITS(data_size,4) && PB_FITS(array_size,1), FIELDINFO_DOES_NOT_FIT_width1_field ## tag)
|
||||
|
||||
#define PB_FIELDINFO_ASSERT_2(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_STATIC_ASSERT(PB_FITS(tag,10) && PB_FITS(data_offset,16) && PB_FITS(size_offset,4) && PB_FITS(data_size,12) && PB_FITS(array_size,12), FIELDINFO_DOES_NOT_FIT_width2_field ## tag)
|
||||
|
||||
#ifndef PB_FIELD_32BIT
|
||||
/* Maximum field sizes are still 16-bit if pb_size_t is 16-bit */
|
||||
#define PB_FIELDINFO_ASSERT_4(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_STATIC_ASSERT(PB_FITS(tag,16) && PB_FITS(data_offset,16) && PB_FITS((int_least8_t)size_offset,8) && PB_FITS(data_size,16) && PB_FITS(array_size,16), FIELDINFO_DOES_NOT_FIT_width4_field ## tag)
|
||||
|
||||
#define PB_FIELDINFO_ASSERT_8(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_STATIC_ASSERT(PB_FITS(tag,16) && PB_FITS(data_offset,16) && PB_FITS((int_least8_t)size_offset,8) && PB_FITS(data_size,16) && PB_FITS(array_size,16), FIELDINFO_DOES_NOT_FIT_width8_field ## tag)
|
||||
#else
|
||||
/* Up to 32-bit fields supported.
|
||||
* Note that the checks are against 31 bits to avoid compiler warnings about shift wider than type in the test.
|
||||
* I expect that there is no reasonable use for >2GB messages with nanopb anyway.
|
||||
*/
|
||||
#define PB_FIELDINFO_ASSERT_4(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_STATIC_ASSERT(PB_FITS(tag,30) && PB_FITS(data_offset,31) && PB_FITS(size_offset,8) && PB_FITS(data_size,31) && PB_FITS(array_size,16), FIELDINFO_DOES_NOT_FIT_width4_field ## tag)
|
||||
|
||||
#define PB_FIELDINFO_ASSERT_8(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_STATIC_ASSERT(PB_FITS(tag,30) && PB_FITS(data_offset,31) && PB_FITS(size_offset,8) && PB_FITS(data_size,31) && PB_FITS(array_size,31), FIELDINFO_DOES_NOT_FIT_width8_field ## tag)
|
||||
#endif
|
||||
|
||||
|
||||
/* Automatic picking of FIELDINFO width:
|
||||
* Uses width 1 when possible, otherwise resorts to width 2.
|
||||
* This is used when PB_BIND() is called with "AUTO" as the argument.
|
||||
* The generator will give explicit size argument when it knows that a message
|
||||
* structure grows beyond 1-word format limits.
|
||||
*/
|
||||
#define PB_FIELDINFO_WIDTH_AUTO(atype, htype, ltype) PB_FI_WIDTH ## atype(htype, ltype)
|
||||
#define PB_FI_WIDTH_PB_ATYPE_STATIC(htype, ltype) PB_FI_WIDTH ## htype(ltype)
|
||||
#define PB_FI_WIDTH_PB_ATYPE_POINTER(htype, ltype) PB_FI_WIDTH ## htype(ltype)
|
||||
#define PB_FI_WIDTH_PB_ATYPE_CALLBACK(htype, ltype) 2
|
||||
#define PB_FI_WIDTH_PB_HTYPE_REQUIRED(ltype) PB_FI_WIDTH ## ltype
|
||||
#define PB_FI_WIDTH_PB_HTYPE_SINGULAR(ltype) PB_FI_WIDTH ## ltype
|
||||
#define PB_FI_WIDTH_PB_HTYPE_OPTIONAL(ltype) PB_FI_WIDTH ## ltype
|
||||
#define PB_FI_WIDTH_PB_HTYPE_ONEOF(ltype) PB_FI_WIDTH ## ltype
|
||||
#define PB_FI_WIDTH_PB_HTYPE_REPEATED(ltype) 2
|
||||
#define PB_FI_WIDTH_PB_HTYPE_FIXARRAY(ltype) 2
|
||||
#define PB_FI_WIDTH_PB_LTYPE_BOOL 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_BYTES 2
|
||||
#define PB_FI_WIDTH_PB_LTYPE_DOUBLE 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_ENUM 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_UENUM 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_FIXED32 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_FIXED64 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_FLOAT 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_INT32 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_INT64 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_MESSAGE 2
|
||||
#define PB_FI_WIDTH_PB_LTYPE_MSG_W_CB 2
|
||||
#define PB_FI_WIDTH_PB_LTYPE_SFIXED32 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_SFIXED64 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_SINT32 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_SINT64 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_STRING 2
|
||||
#define PB_FI_WIDTH_PB_LTYPE_UINT32 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_UINT64 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_EXTENSION 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_FIXED_LENGTH_BYTES 2
|
||||
|
||||
/* The mapping from protobuf types to LTYPEs is done using these macros. */
|
||||
#define PB_LTYPE_MAP_BOOL PB_LTYPE_BOOL
|
||||
#define PB_LTYPE_MAP_BYTES PB_LTYPE_BYTES
|
||||
#define PB_LTYPE_MAP_DOUBLE PB_LTYPE_FIXED64
|
||||
#define PB_LTYPE_MAP_ENUM PB_LTYPE_VARINT
|
||||
#define PB_LTYPE_MAP_UENUM PB_LTYPE_UVARINT
|
||||
#define PB_LTYPE_MAP_FIXED32 PB_LTYPE_FIXED32
|
||||
#define PB_LTYPE_MAP_FIXED64 PB_LTYPE_FIXED64
|
||||
#define PB_LTYPE_MAP_FLOAT PB_LTYPE_FIXED32
|
||||
#define PB_LTYPE_MAP_INT32 PB_LTYPE_VARINT
|
||||
#define PB_LTYPE_MAP_INT64 PB_LTYPE_VARINT
|
||||
#define PB_LTYPE_MAP_MESSAGE PB_LTYPE_SUBMESSAGE
|
||||
#define PB_LTYPE_MAP_MSG_W_CB PB_LTYPE_SUBMSG_W_CB
|
||||
#define PB_LTYPE_MAP_SFIXED32 PB_LTYPE_FIXED32
|
||||
#define PB_LTYPE_MAP_SFIXED64 PB_LTYPE_FIXED64
|
||||
#define PB_LTYPE_MAP_SINT32 PB_LTYPE_SVARINT
|
||||
#define PB_LTYPE_MAP_SINT64 PB_LTYPE_SVARINT
|
||||
#define PB_LTYPE_MAP_STRING PB_LTYPE_STRING
|
||||
#define PB_LTYPE_MAP_UINT32 PB_LTYPE_UVARINT
|
||||
#define PB_LTYPE_MAP_UINT64 PB_LTYPE_UVARINT
|
||||
#define PB_LTYPE_MAP_EXTENSION PB_LTYPE_EXTENSION
|
||||
#define PB_LTYPE_MAP_FIXED_LENGTH_BYTES PB_LTYPE_FIXED_LENGTH_BYTES
|
||||
|
||||
/* These macros are used for giving out error messages.
|
||||
* They are mostly a debugging aid; the main error information
|
||||
* is the true/false return value from functions.
|
||||
* Some code space can be saved by disabling the error
|
||||
* messages if not used.
|
||||
*
|
||||
* PB_SET_ERROR() sets the error message if none has been set yet.
|
||||
* msg must be a constant string literal.
|
||||
* PB_GET_ERROR() always returns a pointer to a string.
|
||||
* PB_RETURN_ERROR() sets the error and returns false from current
|
||||
* function.
|
||||
*/
|
||||
#ifdef PB_NO_ERRMSG
|
||||
#define PB_SET_ERROR(stream, msg) PB_UNUSED(stream)
|
||||
#define PB_GET_ERROR(stream) "(errmsg disabled)"
|
||||
#else
|
||||
#define PB_SET_ERROR(stream, msg) (stream->errmsg = (stream)->errmsg ? (stream)->errmsg : (msg))
|
||||
#define PB_GET_ERROR(stream) ((stream)->errmsg ? (stream)->errmsg : "(none)")
|
||||
#endif
|
||||
|
||||
#define PB_RETURN_ERROR(stream, msg) return PB_SET_ERROR(stream, msg), false
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
#if __cplusplus >= 201103L
|
||||
#define PB_CONSTEXPR constexpr
|
||||
#else // __cplusplus >= 201103L
|
||||
#define PB_CONSTEXPR
|
||||
#endif // __cplusplus >= 201103L
|
||||
|
||||
#if __cplusplus >= 201703L
|
||||
#define PB_INLINE_CONSTEXPR inline constexpr
|
||||
#else // __cplusplus >= 201703L
|
||||
#define PB_INLINE_CONSTEXPR PB_CONSTEXPR
|
||||
#endif // __cplusplus >= 201703L
|
||||
|
||||
extern "C++"
|
||||
{
|
||||
namespace nanopb {
|
||||
// Each type will be partially specialized by the generator.
|
||||
template <typename GenMessageT> struct MessageDescriptor;
|
||||
} // namespace nanopb
|
||||
}
|
||||
#endif /* __cplusplus */
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,388 @@
|
|||
/* pb_common.c: Common support functions for pb_encode.c and pb_decode.c.
|
||||
*
|
||||
* 2014 Petteri Aimonen <jpa@kapsi.fi>
|
||||
*/
|
||||
|
||||
#include "nanopb/pb_common.h"
|
||||
|
||||
static bool load_descriptor_values(pb_field_iter_t *iter)
|
||||
{
|
||||
uint32_t word0;
|
||||
uint32_t data_offset;
|
||||
int_least8_t size_offset;
|
||||
|
||||
if (iter->index >= iter->descriptor->field_count)
|
||||
return false;
|
||||
|
||||
word0 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]);
|
||||
iter->type = (pb_type_t)((word0 >> 8) & 0xFF);
|
||||
|
||||
switch(word0 & 3)
|
||||
{
|
||||
case 0: {
|
||||
/* 1-word format */
|
||||
iter->array_size = 1;
|
||||
iter->tag = (pb_size_t)((word0 >> 2) & 0x3F);
|
||||
size_offset = (int_least8_t)((word0 >> 24) & 0x0F);
|
||||
data_offset = (word0 >> 16) & 0xFF;
|
||||
iter->data_size = (pb_size_t)((word0 >> 28) & 0x0F);
|
||||
break;
|
||||
}
|
||||
|
||||
case 1: {
|
||||
/* 2-word format */
|
||||
uint32_t word1 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 1]);
|
||||
|
||||
iter->array_size = (pb_size_t)((word0 >> 16) & 0x0FFF);
|
||||
iter->tag = (pb_size_t)(((word0 >> 2) & 0x3F) | ((word1 >> 28) << 6));
|
||||
size_offset = (int_least8_t)((word0 >> 28) & 0x0F);
|
||||
data_offset = word1 & 0xFFFF;
|
||||
iter->data_size = (pb_size_t)((word1 >> 16) & 0x0FFF);
|
||||
break;
|
||||
}
|
||||
|
||||
case 2: {
|
||||
/* 4-word format */
|
||||
uint32_t word1 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 1]);
|
||||
uint32_t word2 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 2]);
|
||||
uint32_t word3 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 3]);
|
||||
|
||||
iter->array_size = (pb_size_t)(word0 >> 16);
|
||||
iter->tag = (pb_size_t)(((word0 >> 2) & 0x3F) | ((word1 >> 8) << 6));
|
||||
size_offset = (int_least8_t)(word1 & 0xFF);
|
||||
data_offset = word2;
|
||||
iter->data_size = (pb_size_t)word3;
|
||||
break;
|
||||
}
|
||||
|
||||
default: {
|
||||
/* 8-word format */
|
||||
uint32_t word1 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 1]);
|
||||
uint32_t word2 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 2]);
|
||||
uint32_t word3 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 3]);
|
||||
uint32_t word4 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 4]);
|
||||
|
||||
iter->array_size = (pb_size_t)word4;
|
||||
iter->tag = (pb_size_t)(((word0 >> 2) & 0x3F) | ((word1 >> 8) << 6));
|
||||
size_offset = (int_least8_t)(word1 & 0xFF);
|
||||
data_offset = word2;
|
||||
iter->data_size = (pb_size_t)word3;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!iter->message)
|
||||
{
|
||||
/* Avoid doing arithmetic on null pointers, it is undefined */
|
||||
iter->pField = NULL;
|
||||
iter->pSize = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
iter->pField = (char*)iter->message + data_offset;
|
||||
|
||||
if (size_offset)
|
||||
{
|
||||
iter->pSize = (char*)iter->pField - size_offset;
|
||||
}
|
||||
else if (PB_HTYPE(iter->type) == PB_HTYPE_REPEATED &&
|
||||
(PB_ATYPE(iter->type) == PB_ATYPE_STATIC ||
|
||||
PB_ATYPE(iter->type) == PB_ATYPE_POINTER))
|
||||
{
|
||||
/* Fixed count array */
|
||||
iter->pSize = &iter->array_size;
|
||||
}
|
||||
else
|
||||
{
|
||||
iter->pSize = NULL;
|
||||
}
|
||||
|
||||
if (PB_ATYPE(iter->type) == PB_ATYPE_POINTER && iter->pField != NULL)
|
||||
{
|
||||
iter->pData = *(void**)iter->pField;
|
||||
}
|
||||
else
|
||||
{
|
||||
iter->pData = iter->pField;
|
||||
}
|
||||
}
|
||||
|
||||
if (PB_LTYPE_IS_SUBMSG(iter->type))
|
||||
{
|
||||
iter->submsg_desc = iter->descriptor->submsg_info[iter->submessage_index];
|
||||
}
|
||||
else
|
||||
{
|
||||
iter->submsg_desc = NULL;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void advance_iterator(pb_field_iter_t *iter)
|
||||
{
|
||||
iter->index++;
|
||||
|
||||
if (iter->index >= iter->descriptor->field_count)
|
||||
{
|
||||
/* Restart */
|
||||
iter->index = 0;
|
||||
iter->field_info_index = 0;
|
||||
iter->submessage_index = 0;
|
||||
iter->required_field_index = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Increment indexes based on previous field type.
|
||||
* All field info formats have the following fields:
|
||||
* - lowest 2 bits tell the amount of words in the descriptor (2^n words)
|
||||
* - bits 2..7 give the lowest bits of tag number.
|
||||
* - bits 8..15 give the field type.
|
||||
*/
|
||||
uint32_t prev_descriptor = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]);
|
||||
pb_type_t prev_type = (prev_descriptor >> 8) & 0xFF;
|
||||
pb_size_t descriptor_len = (pb_size_t)(1 << (prev_descriptor & 3));
|
||||
|
||||
/* Add to fields.
|
||||
* The cast to pb_size_t is needed to avoid -Wconversion warning.
|
||||
* Because the data is is constants from generator, there is no danger of overflow.
|
||||
*/
|
||||
iter->field_info_index = (pb_size_t)(iter->field_info_index + descriptor_len);
|
||||
iter->required_field_index = (pb_size_t)(iter->required_field_index + (PB_HTYPE(prev_type) == PB_HTYPE_REQUIRED));
|
||||
iter->submessage_index = (pb_size_t)(iter->submessage_index + PB_LTYPE_IS_SUBMSG(prev_type));
|
||||
}
|
||||
}
|
||||
|
||||
bool pb_field_iter_begin(pb_field_iter_t *iter, const pb_msgdesc_t *desc, void *message)
|
||||
{
|
||||
memset(iter, 0, sizeof(*iter));
|
||||
|
||||
iter->descriptor = desc;
|
||||
iter->message = message;
|
||||
|
||||
return load_descriptor_values(iter);
|
||||
}
|
||||
|
||||
bool pb_field_iter_begin_extension(pb_field_iter_t *iter, pb_extension_t *extension)
|
||||
{
|
||||
const pb_msgdesc_t *msg = (const pb_msgdesc_t*)extension->type->arg;
|
||||
bool status;
|
||||
|
||||
uint32_t word0 = PB_PROGMEM_READU32(msg->field_info[0]);
|
||||
if (PB_ATYPE(word0 >> 8) == PB_ATYPE_POINTER)
|
||||
{
|
||||
/* For pointer extensions, the pointer is stored directly
|
||||
* in the extension structure. This avoids having an extra
|
||||
* indirection. */
|
||||
status = pb_field_iter_begin(iter, msg, &extension->dest);
|
||||
}
|
||||
else
|
||||
{
|
||||
status = pb_field_iter_begin(iter, msg, extension->dest);
|
||||
}
|
||||
|
||||
iter->pSize = &extension->found;
|
||||
return status;
|
||||
}
|
||||
|
||||
bool pb_field_iter_next(pb_field_iter_t *iter)
|
||||
{
|
||||
advance_iterator(iter);
|
||||
(void)load_descriptor_values(iter);
|
||||
return iter->index != 0;
|
||||
}
|
||||
|
||||
bool pb_field_iter_find(pb_field_iter_t *iter, uint32_t tag)
|
||||
{
|
||||
if (iter->tag == tag)
|
||||
{
|
||||
return true; /* Nothing to do, correct field already. */
|
||||
}
|
||||
else if (tag > iter->descriptor->largest_tag)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
pb_size_t start = iter->index;
|
||||
uint32_t fieldinfo;
|
||||
|
||||
if (tag < iter->tag)
|
||||
{
|
||||
/* Fields are in tag number order, so we know that tag is between
|
||||
* 0 and our start position. Setting index to end forces
|
||||
* advance_iterator() call below to restart from beginning. */
|
||||
iter->index = iter->descriptor->field_count;
|
||||
}
|
||||
|
||||
do
|
||||
{
|
||||
/* Advance iterator but don't load values yet */
|
||||
advance_iterator(iter);
|
||||
|
||||
/* Do fast check for tag number match */
|
||||
fieldinfo = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]);
|
||||
|
||||
if (((fieldinfo >> 2) & 0x3F) == (tag & 0x3F))
|
||||
{
|
||||
/* Good candidate, check further */
|
||||
(void)load_descriptor_values(iter);
|
||||
|
||||
if (iter->tag == tag &&
|
||||
PB_LTYPE(iter->type) != PB_LTYPE_EXTENSION)
|
||||
{
|
||||
/* Found it */
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} while (iter->index != start);
|
||||
|
||||
/* Searched all the way back to start, and found nothing. */
|
||||
(void)load_descriptor_values(iter);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool pb_field_iter_find_extension(pb_field_iter_t *iter)
|
||||
{
|
||||
if (PB_LTYPE(iter->type) == PB_LTYPE_EXTENSION)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
pb_size_t start = iter->index;
|
||||
uint32_t fieldinfo;
|
||||
|
||||
do
|
||||
{
|
||||
/* Advance iterator but don't load values yet */
|
||||
advance_iterator(iter);
|
||||
|
||||
/* Do fast check for field type */
|
||||
fieldinfo = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]);
|
||||
|
||||
if (PB_LTYPE((fieldinfo >> 8) & 0xFF) == PB_LTYPE_EXTENSION)
|
||||
{
|
||||
return load_descriptor_values(iter);
|
||||
}
|
||||
} while (iter->index != start);
|
||||
|
||||
/* Searched all the way back to start, and found nothing. */
|
||||
(void)load_descriptor_values(iter);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void *pb_const_cast(const void *p)
|
||||
{
|
||||
/* Note: this casts away const, in order to use the common field iterator
|
||||
* logic for both encoding and decoding. The cast is done using union
|
||||
* to avoid spurious compiler warnings. */
|
||||
union {
|
||||
void *p1;
|
||||
const void *p2;
|
||||
} t;
|
||||
t.p2 = p;
|
||||
return t.p1;
|
||||
}
|
||||
|
||||
bool pb_field_iter_begin_const(pb_field_iter_t *iter, const pb_msgdesc_t *desc, const void *message)
|
||||
{
|
||||
return pb_field_iter_begin(iter, desc, pb_const_cast(message));
|
||||
}
|
||||
|
||||
bool pb_field_iter_begin_extension_const(pb_field_iter_t *iter, const pb_extension_t *extension)
|
||||
{
|
||||
return pb_field_iter_begin_extension(iter, (pb_extension_t*)pb_const_cast(extension));
|
||||
}
|
||||
|
||||
bool pb_default_field_callback(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_t *field)
|
||||
{
|
||||
if (field->data_size == sizeof(pb_callback_t))
|
||||
{
|
||||
pb_callback_t *pCallback = (pb_callback_t*)field->pData;
|
||||
|
||||
if (pCallback != NULL)
|
||||
{
|
||||
if (istream != NULL && pCallback->funcs.decode != NULL)
|
||||
{
|
||||
return pCallback->funcs.decode(istream, field, &pCallback->arg);
|
||||
}
|
||||
|
||||
if (ostream != NULL && pCallback->funcs.encode != NULL)
|
||||
{
|
||||
return pCallback->funcs.encode(ostream, field, &pCallback->arg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true; /* Success, but didn't do anything */
|
||||
|
||||
}
|
||||
|
||||
#ifdef PB_VALIDATE_UTF8
|
||||
|
||||
/* This function checks whether a string is valid UTF-8 text.
|
||||
*
|
||||
* Algorithm is adapted from https://www.cl.cam.ac.uk/~mgk25/ucs/utf8_check.c
|
||||
* Original copyright: Markus Kuhn <http://www.cl.cam.ac.uk/~mgk25/> 2005-03-30
|
||||
* Licensed under "Short code license", which allows use under MIT license or
|
||||
* any compatible with it.
|
||||
*/
|
||||
|
||||
bool pb_validate_utf8(const char *str)
|
||||
{
|
||||
const pb_byte_t *s = (const pb_byte_t*)str;
|
||||
while (*s)
|
||||
{
|
||||
if (*s < 0x80)
|
||||
{
|
||||
/* 0xxxxxxx */
|
||||
s++;
|
||||
}
|
||||
else if ((s[0] & 0xe0) == 0xc0)
|
||||
{
|
||||
/* 110XXXXx 10xxxxxx */
|
||||
if ((s[1] & 0xc0) != 0x80 ||
|
||||
(s[0] & 0xfe) == 0xc0) /* overlong? */
|
||||
return false;
|
||||
else
|
||||
s += 2;
|
||||
}
|
||||
else if ((s[0] & 0xf0) == 0xe0)
|
||||
{
|
||||
/* 1110XXXX 10Xxxxxx 10xxxxxx */
|
||||
if ((s[1] & 0xc0) != 0x80 ||
|
||||
(s[2] & 0xc0) != 0x80 ||
|
||||
(s[0] == 0xe0 && (s[1] & 0xe0) == 0x80) || /* overlong? */
|
||||
(s[0] == 0xed && (s[1] & 0xe0) == 0xa0) || /* surrogate? */
|
||||
(s[0] == 0xef && s[1] == 0xbf &&
|
||||
(s[2] & 0xfe) == 0xbe)) /* U+FFFE or U+FFFF? */
|
||||
return false;
|
||||
else
|
||||
s += 3;
|
||||
}
|
||||
else if ((s[0] & 0xf8) == 0xf0)
|
||||
{
|
||||
/* 11110XXX 10XXxxxx 10xxxxxx 10xxxxxx */
|
||||
if ((s[1] & 0xc0) != 0x80 ||
|
||||
(s[2] & 0xc0) != 0x80 ||
|
||||
(s[3] & 0xc0) != 0x80 ||
|
||||
(s[0] == 0xf0 && (s[1] & 0xf0) == 0x80) || /* overlong? */
|
||||
(s[0] == 0xf4 && s[1] > 0x8f) || s[0] > 0xf4) /* > U+10FFFF? */
|
||||
return false;
|
||||
else
|
||||
s += 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
/* pb_common.h: Common support functions for pb_encode.c and pb_decode.c.
|
||||
* These functions are rarely needed by applications directly.
|
||||
*/
|
||||
|
||||
#ifndef PB_COMMON_H_INCLUDED
|
||||
#define PB_COMMON_H_INCLUDED
|
||||
|
||||
#include "nanopb/pb.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Initialize the field iterator structure to beginning.
|
||||
* Returns false if the message type is empty. */
|
||||
bool pb_field_iter_begin(pb_field_iter_t *iter, const pb_msgdesc_t *desc, void *message);
|
||||
|
||||
/* Get a field iterator for extension field. */
|
||||
bool pb_field_iter_begin_extension(pb_field_iter_t *iter, pb_extension_t *extension);
|
||||
|
||||
/* Same as pb_field_iter_begin(), but for const message pointer.
|
||||
* Note that the pointers in pb_field_iter_t will be non-const but shouldn't
|
||||
* be written to when using these functions. */
|
||||
bool pb_field_iter_begin_const(pb_field_iter_t *iter, const pb_msgdesc_t *desc, const void *message);
|
||||
bool pb_field_iter_begin_extension_const(pb_field_iter_t *iter, const pb_extension_t *extension);
|
||||
|
||||
/* Advance the iterator to the next field.
|
||||
* Returns false when the iterator wraps back to the first field. */
|
||||
bool pb_field_iter_next(pb_field_iter_t *iter);
|
||||
|
||||
/* Advance the iterator until it points at a field with the given tag.
|
||||
* Returns false if no such field exists. */
|
||||
bool pb_field_iter_find(pb_field_iter_t *iter, uint32_t tag);
|
||||
|
||||
/* Find a field with type PB_LTYPE_EXTENSION, or return false if not found.
|
||||
* There can be only one extension range field per message. */
|
||||
bool pb_field_iter_find_extension(pb_field_iter_t *iter);
|
||||
|
||||
#ifdef PB_VALIDATE_UTF8
|
||||
/* Validate UTF-8 text string */
|
||||
bool pb_validate_utf8(const char *s);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,193 @@
|
|||
/* pb_decode.h: Functions to decode protocol buffers. Depends on pb_decode.c.
|
||||
* The main function is pb_decode. You also need an input stream, and the
|
||||
* field descriptions created by nanopb_generator.py.
|
||||
*/
|
||||
|
||||
#ifndef PB_DECODE_H_INCLUDED
|
||||
#define PB_DECODE_H_INCLUDED
|
||||
|
||||
#include "nanopb/pb.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Structure for defining custom input streams. You will need to provide
|
||||
* a callback function to read the bytes from your storage, which can be
|
||||
* for example a file or a network socket.
|
||||
*
|
||||
* The callback must conform to these rules:
|
||||
*
|
||||
* 1) Return false on IO errors. This will cause decoding to abort.
|
||||
* 2) You can use state to store your own data (e.g. buffer pointer),
|
||||
* and rely on pb_read to verify that no-body reads past bytes_left.
|
||||
* 3) Your callback may be used with substreams, in which case bytes_left
|
||||
* is different than from the main stream. Don't use bytes_left to compute
|
||||
* any pointers.
|
||||
*/
|
||||
struct pb_istream_s
|
||||
{
|
||||
#ifdef PB_BUFFER_ONLY
|
||||
/* Callback pointer is not used in buffer-only configuration.
|
||||
* Having an int pointer here allows binary compatibility but
|
||||
* gives an error if someone tries to assign callback function.
|
||||
*/
|
||||
int *callback;
|
||||
#else
|
||||
bool (*callback)(pb_istream_t *stream, pb_byte_t *buf, size_t count);
|
||||
#endif
|
||||
|
||||
void *state; /* Free field for use by callback implementation */
|
||||
size_t bytes_left;
|
||||
|
||||
#ifndef PB_NO_ERRMSG
|
||||
const char *errmsg;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifndef PB_NO_ERRMSG
|
||||
#define PB_ISTREAM_EMPTY {0,0,0,0}
|
||||
#else
|
||||
#define PB_ISTREAM_EMPTY {0,0,0}
|
||||
#endif
|
||||
|
||||
/***************************
|
||||
* Main decoding functions *
|
||||
***************************/
|
||||
|
||||
/* Decode a single protocol buffers message from input stream into a C structure.
|
||||
* Returns true on success, false on any failure.
|
||||
* The actual struct pointed to by dest must match the description in fields.
|
||||
* Callback fields of the destination structure must be initialized by caller.
|
||||
* All other fields will be initialized by this function.
|
||||
*
|
||||
* Example usage:
|
||||
* MyMessage msg = {};
|
||||
* uint8_t buffer[64];
|
||||
* pb_istream_t stream;
|
||||
*
|
||||
* // ... read some data into buffer ...
|
||||
*
|
||||
* stream = pb_istream_from_buffer(buffer, count);
|
||||
* pb_decode(&stream, MyMessage_fields, &msg);
|
||||
*/
|
||||
bool pb_decode(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_struct);
|
||||
|
||||
/* Extended version of pb_decode, with several options to control
|
||||
* the decoding process:
|
||||
*
|
||||
* PB_DECODE_NOINIT: Do not initialize the fields to default values.
|
||||
* This is slightly faster if you do not need the default
|
||||
* values and instead initialize the structure to 0 using
|
||||
* e.g. memset(). This can also be used for merging two
|
||||
* messages, i.e. combine already existing data with new
|
||||
* values.
|
||||
*
|
||||
* PB_DECODE_DELIMITED: Input message starts with the message size as varint.
|
||||
* Corresponds to parseDelimitedFrom() in Google's
|
||||
* protobuf API.
|
||||
*
|
||||
* PB_DECODE_NULLTERMINATED: Stop reading when field tag is read as 0. This allows
|
||||
* reading null terminated messages.
|
||||
* NOTE: Until nanopb-0.4.0, pb_decode() also allows
|
||||
* null-termination. This behaviour is not supported in
|
||||
* most other protobuf implementations, so PB_DECODE_DELIMITED
|
||||
* is a better option for compatibility.
|
||||
*
|
||||
* Multiple flags can be combined with bitwise or (| operator)
|
||||
*/
|
||||
#define PB_DECODE_NOINIT 0x01U
|
||||
#define PB_DECODE_DELIMITED 0x02U
|
||||
#define PB_DECODE_NULLTERMINATED 0x04U
|
||||
bool pb_decode_ex(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_struct, unsigned int flags);
|
||||
|
||||
/* Defines for backwards compatibility with code written before nanopb-0.4.0 */
|
||||
#define pb_decode_noinit(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_NOINIT)
|
||||
#define pb_decode_delimited(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_DELIMITED)
|
||||
#define pb_decode_delimited_noinit(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_DELIMITED | PB_DECODE_NOINIT)
|
||||
#define pb_decode_nullterminated(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_NULLTERMINATED)
|
||||
|
||||
/* Release any allocated pointer fields. If you use dynamic allocation, you should
|
||||
* call this for any successfully decoded message when you are done with it. If
|
||||
* pb_decode() returns with an error, the message is already released.
|
||||
*/
|
||||
void pb_release(const pb_msgdesc_t *fields, void *dest_struct);
|
||||
|
||||
/**************************************
|
||||
* Functions for manipulating streams *
|
||||
**************************************/
|
||||
|
||||
/* Create an input stream for reading from a memory buffer.
|
||||
*
|
||||
* msglen should be the actual length of the message, not the full size of
|
||||
* allocated buffer.
|
||||
*
|
||||
* Alternatively, you can use a custom stream that reads directly from e.g.
|
||||
* a file or a network socket.
|
||||
*/
|
||||
pb_istream_t pb_istream_from_buffer(const pb_byte_t *buf, size_t msglen);
|
||||
|
||||
/* Function to read from a pb_istream_t. You can use this if you need to
|
||||
* read some custom header data, or to read data in field callbacks.
|
||||
*/
|
||||
bool pb_read(pb_istream_t *stream, pb_byte_t *buf, size_t count);
|
||||
|
||||
|
||||
/************************************************
|
||||
* Helper functions for writing field callbacks *
|
||||
************************************************/
|
||||
|
||||
/* Decode the tag for the next field in the stream. Gives the wire type and
|
||||
* field tag. At end of the message, returns false and sets eof to true. */
|
||||
bool pb_decode_tag(pb_istream_t *stream, pb_wire_type_t *wire_type, uint32_t *tag, bool *eof);
|
||||
|
||||
/* Skip the field payload data, given the wire type. */
|
||||
bool pb_skip_field(pb_istream_t *stream, pb_wire_type_t wire_type);
|
||||
|
||||
/* Decode an integer in the varint format. This works for enum, int32,
|
||||
* int64, uint32 and uint64 field types. */
|
||||
#ifndef PB_WITHOUT_64BIT
|
||||
bool pb_decode_varint(pb_istream_t *stream, uint64_t *dest);
|
||||
#else
|
||||
#define pb_decode_varint pb_decode_varint32
|
||||
#endif
|
||||
|
||||
/* Decode an integer in the varint format. This works for enum, int32,
|
||||
* and uint32 field types. */
|
||||
bool pb_decode_varint32(pb_istream_t *stream, uint32_t *dest);
|
||||
|
||||
/* Decode a bool value in varint format. */
|
||||
bool pb_decode_bool(pb_istream_t *stream, bool *dest);
|
||||
|
||||
/* Decode an integer in the zig-zagged svarint format. This works for sint32
|
||||
* and sint64. */
|
||||
#ifndef PB_WITHOUT_64BIT
|
||||
bool pb_decode_svarint(pb_istream_t *stream, int64_t *dest);
|
||||
#else
|
||||
bool pb_decode_svarint(pb_istream_t *stream, int32_t *dest);
|
||||
#endif
|
||||
|
||||
/* Decode a fixed32, sfixed32 or float value. You need to pass a pointer to
|
||||
* a 4-byte wide C variable. */
|
||||
bool pb_decode_fixed32(pb_istream_t *stream, void *dest);
|
||||
|
||||
#ifndef PB_WITHOUT_64BIT
|
||||
/* Decode a fixed64, sfixed64 or double value. You need to pass a pointer to
|
||||
* a 8-byte wide C variable. */
|
||||
bool pb_decode_fixed64(pb_istream_t *stream, void *dest);
|
||||
#endif
|
||||
|
||||
#ifdef PB_CONVERT_DOUBLE_FLOAT
|
||||
/* Decode a double value into float variable. */
|
||||
bool pb_decode_double_as_float(pb_istream_t *stream, float *dest);
|
||||
#endif
|
||||
|
||||
/* Make a limited-length substream for reading a PB_WT_STRING field. */
|
||||
bool pb_make_string_substream(pb_istream_t *stream, pb_istream_t *substream);
|
||||
bool pb_close_string_substream(pb_istream_t *stream, pb_istream_t *substream);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,185 @@
|
|||
/* pb_encode.h: Functions to encode protocol buffers. Depends on pb_encode.c.
|
||||
* The main function is pb_encode. You also need an output stream, and the
|
||||
* field descriptions created by nanopb_generator.py.
|
||||
*/
|
||||
|
||||
#ifndef PB_ENCODE_H_INCLUDED
|
||||
#define PB_ENCODE_H_INCLUDED
|
||||
|
||||
#include "nanopb/pb.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Structure for defining custom output streams. You will need to provide
|
||||
* a callback function to write the bytes to your storage, which can be
|
||||
* for example a file or a network socket.
|
||||
*
|
||||
* The callback must conform to these rules:
|
||||
*
|
||||
* 1) Return false on IO errors. This will cause encoding to abort.
|
||||
* 2) You can use state to store your own data (e.g. buffer pointer).
|
||||
* 3) pb_write will update bytes_written after your callback runs.
|
||||
* 4) Substreams will modify max_size and bytes_written. Don't use them
|
||||
* to calculate any pointers.
|
||||
*/
|
||||
struct pb_ostream_s
|
||||
{
|
||||
#ifdef PB_BUFFER_ONLY
|
||||
/* Callback pointer is not used in buffer-only configuration.
|
||||
* Having an int pointer here allows binary compatibility but
|
||||
* gives an error if someone tries to assign callback function.
|
||||
* Also, NULL pointer marks a 'sizing stream' that does not
|
||||
* write anything.
|
||||
*/
|
||||
const int *callback;
|
||||
#else
|
||||
bool (*callback)(pb_ostream_t *stream, const pb_byte_t *buf, size_t count);
|
||||
#endif
|
||||
void *state; /* Free field for use by callback implementation. */
|
||||
size_t max_size; /* Limit number of output bytes written (or use SIZE_MAX). */
|
||||
size_t bytes_written; /* Number of bytes written so far. */
|
||||
|
||||
#ifndef PB_NO_ERRMSG
|
||||
const char *errmsg;
|
||||
#endif
|
||||
};
|
||||
|
||||
/***************************
|
||||
* Main encoding functions *
|
||||
***************************/
|
||||
|
||||
/* Encode a single protocol buffers message from C structure into a stream.
|
||||
* Returns true on success, false on any failure.
|
||||
* The actual struct pointed to by src_struct must match the description in fields.
|
||||
* All required fields in the struct are assumed to have been filled in.
|
||||
*
|
||||
* Example usage:
|
||||
* MyMessage msg = {};
|
||||
* uint8_t buffer[64];
|
||||
* pb_ostream_t stream;
|
||||
*
|
||||
* msg.field1 = 42;
|
||||
* stream = pb_ostream_from_buffer(buffer, sizeof(buffer));
|
||||
* pb_encode(&stream, MyMessage_fields, &msg);
|
||||
*/
|
||||
bool pb_encode(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct);
|
||||
|
||||
/* Extended version of pb_encode, with several options to control the
|
||||
* encoding process:
|
||||
*
|
||||
* PB_ENCODE_DELIMITED: Prepend the length of message as a varint.
|
||||
* Corresponds to writeDelimitedTo() in Google's
|
||||
* protobuf API.
|
||||
*
|
||||
* PB_ENCODE_NULLTERMINATED: Append a null byte to the message for termination.
|
||||
* NOTE: This behaviour is not supported in most other
|
||||
* protobuf implementations, so PB_ENCODE_DELIMITED
|
||||
* is a better option for compatibility.
|
||||
*/
|
||||
#define PB_ENCODE_DELIMITED 0x02U
|
||||
#define PB_ENCODE_NULLTERMINATED 0x04U
|
||||
bool pb_encode_ex(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct, unsigned int flags);
|
||||
|
||||
/* Defines for backwards compatibility with code written before nanopb-0.4.0 */
|
||||
#define pb_encode_delimited(s,f,d) pb_encode_ex(s,f,d, PB_ENCODE_DELIMITED)
|
||||
#define pb_encode_nullterminated(s,f,d) pb_encode_ex(s,f,d, PB_ENCODE_NULLTERMINATED)
|
||||
|
||||
/* Encode the message to get the size of the encoded data, but do not store
|
||||
* the data. */
|
||||
bool pb_get_encoded_size(size_t *size, const pb_msgdesc_t *fields, const void *src_struct);
|
||||
|
||||
/**************************************
|
||||
* Functions for manipulating streams *
|
||||
**************************************/
|
||||
|
||||
/* Create an output stream for writing into a memory buffer.
|
||||
* The number of bytes written can be found in stream.bytes_written after
|
||||
* encoding the message.
|
||||
*
|
||||
* Alternatively, you can use a custom stream that writes directly to e.g.
|
||||
* a file or a network socket.
|
||||
*/
|
||||
pb_ostream_t pb_ostream_from_buffer(pb_byte_t *buf, size_t bufsize);
|
||||
|
||||
/* Pseudo-stream for measuring the size of a message without actually storing
|
||||
* the encoded data.
|
||||
*
|
||||
* Example usage:
|
||||
* MyMessage msg = {};
|
||||
* pb_ostream_t stream = PB_OSTREAM_SIZING;
|
||||
* pb_encode(&stream, MyMessage_fields, &msg);
|
||||
* printf("Message size is %d\n", stream.bytes_written);
|
||||
*/
|
||||
#ifndef PB_NO_ERRMSG
|
||||
#define PB_OSTREAM_SIZING {0,0,0,0,0}
|
||||
#else
|
||||
#define PB_OSTREAM_SIZING {0,0,0,0}
|
||||
#endif
|
||||
|
||||
/* Function to write into a pb_ostream_t stream. You can use this if you need
|
||||
* to append or prepend some custom headers to the message.
|
||||
*/
|
||||
bool pb_write(pb_ostream_t *stream, const pb_byte_t *buf, size_t count);
|
||||
|
||||
|
||||
/************************************************
|
||||
* Helper functions for writing field callbacks *
|
||||
************************************************/
|
||||
|
||||
/* Encode field header based on type and field number defined in the field
|
||||
* structure. Call this from the callback before writing out field contents. */
|
||||
bool pb_encode_tag_for_field(pb_ostream_t *stream, const pb_field_iter_t *field);
|
||||
|
||||
/* Encode field header by manually specifying wire type. You need to use this
|
||||
* if you want to write out packed arrays from a callback field. */
|
||||
bool pb_encode_tag(pb_ostream_t *stream, pb_wire_type_t wiretype, uint32_t field_number);
|
||||
|
||||
/* Encode an integer in the varint format.
|
||||
* This works for bool, enum, int32, int64, uint32 and uint64 field types. */
|
||||
#ifndef PB_WITHOUT_64BIT
|
||||
bool pb_encode_varint(pb_ostream_t *stream, uint64_t value);
|
||||
#else
|
||||
bool pb_encode_varint(pb_ostream_t *stream, uint32_t value);
|
||||
#endif
|
||||
|
||||
/* Encode an integer in the zig-zagged svarint format.
|
||||
* This works for sint32 and sint64. */
|
||||
#ifndef PB_WITHOUT_64BIT
|
||||
bool pb_encode_svarint(pb_ostream_t *stream, int64_t value);
|
||||
#else
|
||||
bool pb_encode_svarint(pb_ostream_t *stream, int32_t value);
|
||||
#endif
|
||||
|
||||
/* Encode a string or bytes type field. For strings, pass strlen(s) as size. */
|
||||
bool pb_encode_string(pb_ostream_t *stream, const pb_byte_t *buffer, size_t size);
|
||||
|
||||
/* Encode a fixed32, sfixed32 or float value.
|
||||
* You need to pass a pointer to a 4-byte wide C variable. */
|
||||
bool pb_encode_fixed32(pb_ostream_t *stream, const void *value);
|
||||
|
||||
#ifndef PB_WITHOUT_64BIT
|
||||
/* Encode a fixed64, sfixed64 or double value.
|
||||
* You need to pass a pointer to a 8-byte wide C variable. */
|
||||
bool pb_encode_fixed64(pb_ostream_t *stream, const void *value);
|
||||
#endif
|
||||
|
||||
#ifdef PB_CONVERT_DOUBLE_FLOAT
|
||||
/* Encode a float value so that it appears like a double in the encoded
|
||||
* message. */
|
||||
bool pb_encode_float_as_double(pb_ostream_t *stream, float value);
|
||||
#endif
|
||||
|
||||
/* Encode a submessage field.
|
||||
* You need to pass the pb_field_t array and pointer to struct, just like
|
||||
* with pb_encode(). This internally encodes the submessage twice, first to
|
||||
* calculate message size and then to actually write it out.
|
||||
*/
|
||||
bool pb_encode_submessage(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
/* Automatically generated nanopb constant definitions */
|
||||
/* Generated by nanopb-0.4.8-dev */
|
||||
|
||||
#include "opentelemetry/common.pb.h"
|
||||
#if PB_PROTO_HEADER_VERSION != 40
|
||||
#error Regenerate this file with the current version of nanopb generator.
|
||||
#endif
|
||||
|
||||
PB_BIND(opentelemetry_proto_common_v1_AnyValue, opentelemetry_proto_common_v1_AnyValue, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_common_v1_ArrayValue, opentelemetry_proto_common_v1_ArrayValue, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_common_v1_KeyValueList, opentelemetry_proto_common_v1_KeyValueList, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_common_v1_KeyValue, opentelemetry_proto_common_v1_KeyValue, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_common_v1_InstrumentationScope, opentelemetry_proto_common_v1_InstrumentationScope, AUTO)
|
||||
|
||||
|
||||
|
||||
#ifndef PB_CONVERT_DOUBLE_FLOAT
|
||||
/* On some platforms (such as AVR), double is really float.
|
||||
* To be able to encode/decode double on these platforms, you need.
|
||||
* to define PB_CONVERT_DOUBLE_FLOAT in pb.h or compiler command line.
|
||||
*/
|
||||
PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES)
|
||||
#endif
|
||||
|
||||
|
|
@ -0,0 +1,170 @@
|
|||
/* Automatically generated nanopb header */
|
||||
/* Generated by nanopb-0.4.8-dev */
|
||||
|
||||
#ifndef PB_OPENTELEMETRY_PROTO_COMMON_V1_OPENTELEMETRY_PROTO_COMMON_V1_COMMON_PB_H_INCLUDED
|
||||
#define PB_OPENTELEMETRY_PROTO_COMMON_V1_OPENTELEMETRY_PROTO_COMMON_V1_COMMON_PB_H_INCLUDED
|
||||
#include <nanopb/pb.h>
|
||||
|
||||
#if PB_PROTO_HEADER_VERSION != 40
|
||||
#error Regenerate this file with the current version of nanopb generator.
|
||||
#endif
|
||||
|
||||
/* Struct definitions */
|
||||
/* ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
|
||||
since oneof in AnyValue does not allow repeated fields. */
|
||||
typedef struct _opentelemetry_proto_common_v1_ArrayValue {
|
||||
/* Array of values. The array may be empty (contain 0 elements). */
|
||||
pb_callback_t values;
|
||||
} opentelemetry_proto_common_v1_ArrayValue;
|
||||
|
||||
/* KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
|
||||
since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
|
||||
a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
|
||||
avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
|
||||
are semantically equivalent. */
|
||||
typedef struct _opentelemetry_proto_common_v1_KeyValueList {
|
||||
/* A collection of key/value pairs of key-value pairs. The list may be empty (may
|
||||
contain 0 elements).
|
||||
The keys MUST be unique (it is not allowed to have more than one
|
||||
value with the same key). */
|
||||
pb_callback_t values;
|
||||
} opentelemetry_proto_common_v1_KeyValueList;
|
||||
|
||||
/* AnyValue is used to represent any type of attribute value. AnyValue may contain a
|
||||
primitive value such as a string or integer or it may contain an arbitrary nested
|
||||
object containing arrays, key-value lists and primitives. */
|
||||
typedef struct _opentelemetry_proto_common_v1_AnyValue {
|
||||
pb_size_t which_value;
|
||||
union {
|
||||
pb_callback_t string_value;
|
||||
bool bool_value;
|
||||
int64_t int_value;
|
||||
double double_value;
|
||||
opentelemetry_proto_common_v1_ArrayValue array_value;
|
||||
opentelemetry_proto_common_v1_KeyValueList kvlist_value;
|
||||
pb_callback_t bytes_value;
|
||||
} value;
|
||||
} opentelemetry_proto_common_v1_AnyValue;
|
||||
|
||||
/* KeyValue is a key-value pair that is used to store Span attributes, Link
|
||||
attributes, etc. */
|
||||
typedef struct _opentelemetry_proto_common_v1_KeyValue {
|
||||
pb_callback_t key;
|
||||
bool has_value;
|
||||
opentelemetry_proto_common_v1_AnyValue value;
|
||||
} opentelemetry_proto_common_v1_KeyValue;
|
||||
|
||||
/* InstrumentationScope is a message representing the instrumentation scope information
|
||||
such as the fully qualified name and version. */
|
||||
typedef struct _opentelemetry_proto_common_v1_InstrumentationScope {
|
||||
/* An empty instrumentation scope name means the name is unknown. */
|
||||
pb_callback_t name;
|
||||
pb_callback_t version;
|
||||
/* Additional attributes that describe the scope. [Optional].
|
||||
Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
attribute with the same key). */
|
||||
pb_callback_t attributes;
|
||||
uint32_t dropped_attributes_count;
|
||||
} opentelemetry_proto_common_v1_InstrumentationScope;
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Initializer values for message structs */
|
||||
#define opentelemetry_proto_common_v1_AnyValue_init_default {0, {{{NULL}, NULL}}}
|
||||
#define opentelemetry_proto_common_v1_ArrayValue_init_default {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_common_v1_KeyValueList_init_default {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_common_v1_KeyValue_init_default {{{NULL}, NULL}, false, opentelemetry_proto_common_v1_AnyValue_init_default}
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_init_default {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0}
|
||||
#define opentelemetry_proto_common_v1_AnyValue_init_zero {0, {{{NULL}, NULL}}}
|
||||
#define opentelemetry_proto_common_v1_ArrayValue_init_zero {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_common_v1_KeyValueList_init_zero {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_common_v1_KeyValue_init_zero {{{NULL}, NULL}, false, opentelemetry_proto_common_v1_AnyValue_init_zero}
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0}
|
||||
|
||||
/* Field tags (for use in manual encoding/decoding) */
|
||||
#define opentelemetry_proto_common_v1_ArrayValue_values_tag 1
|
||||
#define opentelemetry_proto_common_v1_KeyValueList_values_tag 1
|
||||
#define opentelemetry_proto_common_v1_AnyValue_string_value_tag 1
|
||||
#define opentelemetry_proto_common_v1_AnyValue_bool_value_tag 2
|
||||
#define opentelemetry_proto_common_v1_AnyValue_int_value_tag 3
|
||||
#define opentelemetry_proto_common_v1_AnyValue_double_value_tag 4
|
||||
#define opentelemetry_proto_common_v1_AnyValue_array_value_tag 5
|
||||
#define opentelemetry_proto_common_v1_AnyValue_kvlist_value_tag 6
|
||||
#define opentelemetry_proto_common_v1_AnyValue_bytes_value_tag 7
|
||||
#define opentelemetry_proto_common_v1_KeyValue_key_tag 1
|
||||
#define opentelemetry_proto_common_v1_KeyValue_value_tag 2
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_name_tag 1
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_version_tag 2
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_attributes_tag 3
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_dropped_attributes_count_tag 4
|
||||
|
||||
/* Struct field encoding specification for nanopb */
|
||||
#define opentelemetry_proto_common_v1_AnyValue_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, ONEOF, STRING, (value,string_value,value.string_value), 1) \
|
||||
X(a, STATIC, ONEOF, BOOL, (value,bool_value,value.bool_value), 2) \
|
||||
X(a, STATIC, ONEOF, INT64, (value,int_value,value.int_value), 3) \
|
||||
X(a, STATIC, ONEOF, DOUBLE, (value,double_value,value.double_value), 4) \
|
||||
X(a, STATIC, ONEOF, MESSAGE, (value,array_value,value.array_value), 5) \
|
||||
X(a, STATIC, ONEOF, MESSAGE, (value,kvlist_value,value.kvlist_value), 6) \
|
||||
X(a, CALLBACK, ONEOF, BYTES, (value,bytes_value,value.bytes_value), 7)
|
||||
#define opentelemetry_proto_common_v1_AnyValue_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_common_v1_AnyValue_DEFAULT NULL
|
||||
#define opentelemetry_proto_common_v1_AnyValue_value_array_value_MSGTYPE opentelemetry_proto_common_v1_ArrayValue
|
||||
#define opentelemetry_proto_common_v1_AnyValue_value_kvlist_value_MSGTYPE opentelemetry_proto_common_v1_KeyValueList
|
||||
|
||||
#define opentelemetry_proto_common_v1_ArrayValue_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, values, 1)
|
||||
#define opentelemetry_proto_common_v1_ArrayValue_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_common_v1_ArrayValue_DEFAULT NULL
|
||||
#define opentelemetry_proto_common_v1_ArrayValue_values_MSGTYPE opentelemetry_proto_common_v1_AnyValue
|
||||
|
||||
#define opentelemetry_proto_common_v1_KeyValueList_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, values, 1)
|
||||
#define opentelemetry_proto_common_v1_KeyValueList_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_common_v1_KeyValueList_DEFAULT NULL
|
||||
#define opentelemetry_proto_common_v1_KeyValueList_values_MSGTYPE opentelemetry_proto_common_v1_KeyValue
|
||||
|
||||
#define opentelemetry_proto_common_v1_KeyValue_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, SINGULAR, STRING, key, 1) \
|
||||
X(a, STATIC, OPTIONAL, MESSAGE, value, 2)
|
||||
#define opentelemetry_proto_common_v1_KeyValue_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_common_v1_KeyValue_DEFAULT NULL
|
||||
#define opentelemetry_proto_common_v1_KeyValue_value_MSGTYPE opentelemetry_proto_common_v1_AnyValue
|
||||
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, SINGULAR, STRING, name, 1) \
|
||||
X(a, CALLBACK, SINGULAR, STRING, version, 2) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, attributes, 3) \
|
||||
X(a, STATIC, SINGULAR, UINT32, dropped_attributes_count, 4)
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_DEFAULT NULL
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue
|
||||
|
||||
extern const pb_msgdesc_t opentelemetry_proto_common_v1_AnyValue_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_common_v1_ArrayValue_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_common_v1_KeyValueList_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_common_v1_KeyValue_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_common_v1_InstrumentationScope_msg;
|
||||
|
||||
/* Defines for backwards compatibility with code written before nanopb-0.4.0 */
|
||||
#define opentelemetry_proto_common_v1_AnyValue_fields &opentelemetry_proto_common_v1_AnyValue_msg
|
||||
#define opentelemetry_proto_common_v1_ArrayValue_fields &opentelemetry_proto_common_v1_ArrayValue_msg
|
||||
#define opentelemetry_proto_common_v1_KeyValueList_fields &opentelemetry_proto_common_v1_KeyValueList_msg
|
||||
#define opentelemetry_proto_common_v1_KeyValue_fields &opentelemetry_proto_common_v1_KeyValue_msg
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_fields &opentelemetry_proto_common_v1_InstrumentationScope_msg
|
||||
|
||||
/* Maximum encoded size of messages (where known) */
|
||||
/* opentelemetry_proto_common_v1_AnyValue_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_common_v1_ArrayValue_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_common_v1_KeyValueList_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_common_v1_KeyValue_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_common_v1_InstrumentationScope_size depends on runtime parameters */
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
# Needed to generate callback for data types within Metrics which isn't generated for oneof types by default
|
||||
opentelemetry.proto.metrics.v1.Metric submsg_callback:true;
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
/* Automatically generated nanopb constant definitions */
|
||||
/* Generated by nanopb-0.4.8-dev */
|
||||
|
||||
#include "opentelemetry/metrics.pb.h"
|
||||
#if PB_PROTO_HEADER_VERSION != 40
|
||||
#error Regenerate this file with the current version of nanopb generator.
|
||||
#endif
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_MetricsData, opentelemetry_proto_metrics_v1_MetricsData, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_ResourceMetrics, opentelemetry_proto_metrics_v1_ResourceMetrics, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_ScopeMetrics, opentelemetry_proto_metrics_v1_ScopeMetrics, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_Metric, opentelemetry_proto_metrics_v1_Metric, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_Gauge, opentelemetry_proto_metrics_v1_Gauge, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_Sum, opentelemetry_proto_metrics_v1_Sum, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_Histogram, opentelemetry_proto_metrics_v1_Histogram, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_ExponentialHistogram, opentelemetry_proto_metrics_v1_ExponentialHistogram, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_Summary, opentelemetry_proto_metrics_v1_Summary, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_NumberDataPoint, opentelemetry_proto_metrics_v1_NumberDataPoint, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_HistogramDataPoint, opentelemetry_proto_metrics_v1_HistogramDataPoint, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_SummaryDataPoint, opentelemetry_proto_metrics_v1_SummaryDataPoint, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile, opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_Exemplar, opentelemetry_proto_metrics_v1_Exemplar, AUTO)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#ifndef PB_CONVERT_DOUBLE_FLOAT
|
||||
/* On some platforms (such as AVR), double is really float.
|
||||
* To be able to encode/decode double on these platforms, you need.
|
||||
* to define PB_CONVERT_DOUBLE_FLOAT in pb.h or compiler command line.
|
||||
*/
|
||||
PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES)
|
||||
#endif
|
||||
|
||||
|
|
@ -0,0 +1,966 @@
|
|||
/* Automatically generated nanopb header */
|
||||
/* Generated by nanopb-0.4.8-dev */
|
||||
|
||||
#ifndef PB_OPENTELEMETRY_PROTO_METRICS_V1_OPENTELEMETRY_PROTO_METRICS_V1_METRICS_PB_H_INCLUDED
|
||||
#define PB_OPENTELEMETRY_PROTO_METRICS_V1_OPENTELEMETRY_PROTO_METRICS_V1_METRICS_PB_H_INCLUDED
|
||||
#include <nanopb/pb.h>
|
||||
#include "opentelemetry/common.pb.h"
|
||||
#include "opentelemetry/resource.pb.h"
|
||||
|
||||
#if PB_PROTO_HEADER_VERSION != 40
|
||||
#error Regenerate this file with the current version of nanopb generator.
|
||||
#endif
|
||||
|
||||
/* Enum definitions */
|
||||
/* AggregationTemporality defines how a metric aggregator reports aggregated
|
||||
values. It describes how those values relate to the time interval over
|
||||
which they are aggregated. */
|
||||
typedef enum _opentelemetry_proto_metrics_v1_AggregationTemporality {
|
||||
/* UNSPECIFIED is the default AggregationTemporality, it MUST not be used. */
|
||||
opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED = 0,
|
||||
/* DELTA is an AggregationTemporality for a metric aggregator which reports
|
||||
changes since last report time. Successive metrics contain aggregation of
|
||||
values from continuous and non-overlapping intervals.
|
||||
|
||||
The values for a DELTA metric are based only on the time interval
|
||||
associated with one measurement cycle. There is no dependency on
|
||||
previous measurements like is the case for CUMULATIVE metrics.
|
||||
|
||||
For example, consider a system measuring the number of requests that
|
||||
it receives and reports the sum of these requests every second as a
|
||||
DELTA metric:
|
||||
|
||||
1. The system starts receiving at time=t_0.
|
||||
2. A request is received, the system measures 1 request.
|
||||
3. A request is received, the system measures 1 request.
|
||||
4. A request is received, the system measures 1 request.
|
||||
5. The 1 second collection cycle ends. A metric is exported for the
|
||||
number of requests received over the interval of time t_0 to
|
||||
t_0+1 with a value of 3.
|
||||
6. A request is received, the system measures 1 request.
|
||||
7. A request is received, the system measures 1 request.
|
||||
8. The 1 second collection cycle ends. A metric is exported for the
|
||||
number of requests received over the interval of time t_0+1 to
|
||||
t_0+2 with a value of 2. */
|
||||
opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA = 1,
|
||||
/* CUMULATIVE is an AggregationTemporality for a metric aggregator which
|
||||
reports changes since a fixed start time. This means that current values
|
||||
of a CUMULATIVE metric depend on all previous measurements since the
|
||||
start time. Because of this, the sender is required to retain this state
|
||||
in some form. If this state is lost or invalidated, the CUMULATIVE metric
|
||||
values MUST be reset and a new fixed start time following the last
|
||||
reported measurement time sent MUST be used.
|
||||
|
||||
For example, consider a system measuring the number of requests that
|
||||
it receives and reports the sum of these requests every second as a
|
||||
CUMULATIVE metric:
|
||||
|
||||
1. The system starts receiving at time=t_0.
|
||||
2. A request is received, the system measures 1 request.
|
||||
3. A request is received, the system measures 1 request.
|
||||
4. A request is received, the system measures 1 request.
|
||||
5. The 1 second collection cycle ends. A metric is exported for the
|
||||
number of requests received over the interval of time t_0 to
|
||||
t_0+1 with a value of 3.
|
||||
6. A request is received, the system measures 1 request.
|
||||
7. A request is received, the system measures 1 request.
|
||||
8. The 1 second collection cycle ends. A metric is exported for the
|
||||
number of requests received over the interval of time t_0 to
|
||||
t_0+2 with a value of 5.
|
||||
9. The system experiences a fault and loses state.
|
||||
10. The system recovers and resumes receiving at time=t_1.
|
||||
11. A request is received, the system measures 1 request.
|
||||
12. The 1 second collection cycle ends. A metric is exported for the
|
||||
number of requests received over the interval of time t_1 to
|
||||
t_0+1 with a value of 1.
|
||||
|
||||
Note: Even though, when reporting changes since last report time, using
|
||||
CUMULATIVE is valid, it is not recommended. This may cause problems for
|
||||
systems that do not use start_time to determine when the aggregation
|
||||
value was reset (e.g. Prometheus). */
|
||||
opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE = 2
|
||||
} opentelemetry_proto_metrics_v1_AggregationTemporality;
|
||||
|
||||
/* DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
|
||||
bit-field representing 32 distinct boolean flags. Each flag defined in this
|
||||
enum is a bit-mask. To test the presence of a single flag in the flags of
|
||||
a data point, for example, use an expression like:
|
||||
|
||||
(point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK */
|
||||
typedef enum _opentelemetry_proto_metrics_v1_DataPointFlags {
|
||||
/* The zero value for the enum. Should not be used for comparisons.
|
||||
Instead use bitwise "and" with the appropriate mask as shown above. */
|
||||
opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_DO_NOT_USE = 0,
|
||||
/* This DataPoint is valid but has no recorded value. This value
|
||||
SHOULD be used to reflect explicitly missing data in a series, as
|
||||
for an equivalent to the Prometheus "staleness marker". */
|
||||
opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK = 1
|
||||
} opentelemetry_proto_metrics_v1_DataPointFlags;
|
||||
|
||||
/* Struct definitions */
|
||||
/* MetricsData represents the metrics data that can be stored in a persistent
|
||||
storage, OR can be embedded by other protocols that transfer OTLP metrics
|
||||
data but do not implement the OTLP protocol.
|
||||
|
||||
The main difference between this message and collector protocol is that
|
||||
in this message there will not be any "control" or "metadata" specific to
|
||||
OTLP protocol.
|
||||
|
||||
When new fields are added into this message, the OTLP request MUST be updated
|
||||
as well. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_MetricsData {
|
||||
/* An array of ResourceMetrics.
|
||||
For data coming from a single resource this array will typically contain
|
||||
one element. Intermediary nodes that receive data from multiple origins
|
||||
typically batch the data before forwarding further and in that case this
|
||||
array will contain multiple elements. */
|
||||
pb_callback_t resource_metrics;
|
||||
} opentelemetry_proto_metrics_v1_MetricsData;
|
||||
|
||||
/* A collection of ScopeMetrics from a Resource. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_ResourceMetrics {
|
||||
/* The resource for the metrics in this message.
|
||||
If this field is not set then no resource info is known. */
|
||||
bool has_resource;
|
||||
opentelemetry_proto_resource_v1_Resource resource;
|
||||
/* A list of metrics that originate from a resource. */
|
||||
pb_callback_t scope_metrics;
|
||||
/* This schema_url applies to the data in the "resource" field. It does not apply
|
||||
to the data in the "scope_metrics" field which have their own schema_url field. */
|
||||
pb_callback_t schema_url;
|
||||
} opentelemetry_proto_metrics_v1_ResourceMetrics;
|
||||
|
||||
/* A collection of Metrics produced by an Scope. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_ScopeMetrics {
|
||||
/* The instrumentation scope information for the metrics in this message.
|
||||
Semantically when InstrumentationScope isn't set, it is equivalent with
|
||||
an empty instrumentation scope name (unknown). */
|
||||
bool has_scope;
|
||||
opentelemetry_proto_common_v1_InstrumentationScope scope;
|
||||
/* A list of metrics that originate from an instrumentation library. */
|
||||
pb_callback_t metrics;
|
||||
/* This schema_url applies to all metrics in the "metrics" field. */
|
||||
pb_callback_t schema_url;
|
||||
} opentelemetry_proto_metrics_v1_ScopeMetrics;
|
||||
|
||||
/* Gauge represents the type of a scalar metric that always exports the
|
||||
"current value" for every data point. It should be used for an "unknown"
|
||||
aggregation.
|
||||
|
||||
A Gauge does not support different aggregation temporalities. Given the
|
||||
aggregation is unknown, points cannot be combined using the same
|
||||
aggregation, regardless of aggregation temporalities. Therefore,
|
||||
AggregationTemporality is not included. Consequently, this also means
|
||||
"StartTimeUnixNano" is ignored for all data points. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_Gauge {
|
||||
pb_callback_t data_points;
|
||||
} opentelemetry_proto_metrics_v1_Gauge;
|
||||
|
||||
/* Sum represents the type of a scalar metric that is calculated as a sum of all
|
||||
reported measurements over a time interval. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_Sum {
|
||||
pb_callback_t data_points;
|
||||
/* aggregation_temporality describes if the aggregator reports delta changes
|
||||
since last report time, or cumulative changes since a fixed start time. */
|
||||
opentelemetry_proto_metrics_v1_AggregationTemporality aggregation_temporality;
|
||||
/* If "true" means that the sum is monotonic. */
|
||||
bool is_monotonic;
|
||||
} opentelemetry_proto_metrics_v1_Sum;
|
||||
|
||||
/* Histogram represents the type of a metric that is calculated by aggregating
|
||||
as a Histogram of all reported measurements over a time interval. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_Histogram {
|
||||
pb_callback_t data_points;
|
||||
/* aggregation_temporality describes if the aggregator reports delta changes
|
||||
since last report time, or cumulative changes since a fixed start time. */
|
||||
opentelemetry_proto_metrics_v1_AggregationTemporality aggregation_temporality;
|
||||
} opentelemetry_proto_metrics_v1_Histogram;
|
||||
|
||||
/* ExponentialHistogram represents the type of a metric that is calculated by aggregating
|
||||
as a ExponentialHistogram of all reported double measurements over a time interval. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_ExponentialHistogram {
|
||||
pb_callback_t data_points;
|
||||
/* aggregation_temporality describes if the aggregator reports delta changes
|
||||
since last report time, or cumulative changes since a fixed start time. */
|
||||
opentelemetry_proto_metrics_v1_AggregationTemporality aggregation_temporality;
|
||||
} opentelemetry_proto_metrics_v1_ExponentialHistogram;
|
||||
|
||||
/* Summary metric data are used to convey quantile summaries,
|
||||
a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
|
||||
and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
|
||||
data type. These data points cannot always be merged in a meaningful way.
|
||||
While they can be useful in some applications, histogram data points are
|
||||
recommended for new applications. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_Summary {
|
||||
pb_callback_t data_points;
|
||||
} opentelemetry_proto_metrics_v1_Summary;
|
||||
|
||||
/* Defines a Metric which has one or more timeseries. The following is a
|
||||
brief summary of the Metric data model. For more details, see:
|
||||
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
|
||||
|
||||
|
||||
The data model and relation between entities is shown in the
|
||||
diagram below. Here, "DataPoint" is the term used to refer to any
|
||||
one of the specific data point value types, and "points" is the term used
|
||||
to refer to any one of the lists of points contained in the Metric.
|
||||
|
||||
- Metric is composed of a metadata and data.
|
||||
- Metadata part contains a name, description, unit.
|
||||
- Data is one of the possible types (Sum, Gauge, Histogram, Summary).
|
||||
- DataPoint contains timestamps, attributes, and one of the possible value type
|
||||
fields.
|
||||
|
||||
Metric
|
||||
+------------+
|
||||
|name |
|
||||
|description |
|
||||
|unit | +------------------------------------+
|
||||
|data |---> |Gauge, Sum, Histogram, Summary, ... |
|
||||
+------------+ +------------------------------------+
|
||||
|
||||
Data [One of Gauge, Sum, Histogram, Summary, ...]
|
||||
+-----------+
|
||||
|... | // Metadata about the Data.
|
||||
|points |--+
|
||||
+-----------+ |
|
||||
| +---------------------------+
|
||||
| |DataPoint 1 |
|
||||
v |+------+------+ +------+ |
|
||||
+-----+ ||label |label |...|label | |
|
||||
| 1 |-->||value1|value2|...|valueN| |
|
||||
+-----+ |+------+------+ +------+ |
|
||||
| . | |+-----+ |
|
||||
| . | ||value| |
|
||||
| . | |+-----+ |
|
||||
| . | +---------------------------+
|
||||
| . | .
|
||||
| . | .
|
||||
| . | .
|
||||
| . | +---------------------------+
|
||||
| . | |DataPoint M |
|
||||
+-----+ |+------+------+ +------+ |
|
||||
| M |-->||label |label |...|label | |
|
||||
+-----+ ||value1|value2|...|valueN| |
|
||||
|+------+------+ +------+ |
|
||||
|+-----+ |
|
||||
||value| |
|
||||
|+-----+ |
|
||||
+---------------------------+
|
||||
|
||||
Each distinct type of DataPoint represents the output of a specific
|
||||
aggregation function, the result of applying the DataPoint's
|
||||
associated function of to one or more measurements.
|
||||
|
||||
All DataPoint types have three common fields:
|
||||
- Attributes includes key-value pairs associated with the data point
|
||||
- TimeUnixNano is required, set to the end time of the aggregation
|
||||
- StartTimeUnixNano is optional, but strongly encouraged for DataPoints
|
||||
having an AggregationTemporality field, as discussed below.
|
||||
|
||||
Both TimeUnixNano and StartTimeUnixNano values are expressed as
|
||||
UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
|
||||
|
||||
# TimeUnixNano
|
||||
|
||||
This field is required, having consistent interpretation across
|
||||
DataPoint types. TimeUnixNano is the moment corresponding to when
|
||||
the data point's aggregate value was captured.
|
||||
|
||||
Data points with the 0 value for TimeUnixNano SHOULD be rejected
|
||||
by consumers.
|
||||
|
||||
# StartTimeUnixNano
|
||||
|
||||
StartTimeUnixNano in general allows detecting when a sequence of
|
||||
observations is unbroken. This field indicates to consumers the
|
||||
start time for points with cumulative and delta
|
||||
AggregationTemporality, and it should be included whenever possible
|
||||
to support correct rate calculation. Although it may be omitted
|
||||
when the start time is truly unknown, setting StartTimeUnixNano is
|
||||
strongly encouraged. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_Metric {
|
||||
/* name of the metric, including its DNS name prefix. It must be unique. */
|
||||
pb_callback_t name;
|
||||
/* description of the metric, which can be used in documentation. */
|
||||
pb_callback_t description;
|
||||
/* unit in which the metric value is reported. Follows the format
|
||||
described by http://unitsofmeasure.org/ucum.html. */
|
||||
pb_callback_t unit;
|
||||
pb_callback_t cb_data;
|
||||
pb_size_t which_data;
|
||||
union {
|
||||
opentelemetry_proto_metrics_v1_Gauge gauge;
|
||||
opentelemetry_proto_metrics_v1_Sum sum;
|
||||
opentelemetry_proto_metrics_v1_Histogram histogram;
|
||||
opentelemetry_proto_metrics_v1_ExponentialHistogram exponential_histogram;
|
||||
opentelemetry_proto_metrics_v1_Summary summary;
|
||||
} data;
|
||||
} opentelemetry_proto_metrics_v1_Metric;
|
||||
|
||||
/* NumberDataPoint is a single data point in a timeseries that describes the
|
||||
time-varying scalar value of a metric. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_NumberDataPoint {
|
||||
/* StartTimeUnixNano is optional but strongly encouraged, see the
|
||||
the detailed comments above Metric.
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t start_time_unix_nano;
|
||||
/* TimeUnixNano is required, see the detailed comments above Metric.
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t time_unix_nano;
|
||||
pb_size_t which_value;
|
||||
union {
|
||||
double as_double;
|
||||
int64_t as_int;
|
||||
} value;
|
||||
/* (Optional) List of exemplars collected from
|
||||
measurements that were used to form the data point */
|
||||
pb_callback_t exemplars;
|
||||
/* The set of key/value pairs that uniquely identify the timeseries from
|
||||
where this point belongs. The list may be empty (may contain 0 elements).
|
||||
Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
attribute with the same key). */
|
||||
pb_callback_t attributes;
|
||||
/* Flags that apply to this specific data point. See DataPointFlags
|
||||
for the available flags and their meaning. */
|
||||
uint32_t flags;
|
||||
} opentelemetry_proto_metrics_v1_NumberDataPoint;
|
||||
|
||||
/* HistogramDataPoint is a single data point in a timeseries that describes the
|
||||
time-varying values of a Histogram. A Histogram contains summary statistics
|
||||
for a population of values, it may optionally contain the distribution of
|
||||
those values across a set of buckets.
|
||||
|
||||
If the histogram contains the distribution of values, then both
|
||||
"explicit_bounds" and "bucket counts" fields must be defined.
|
||||
If the histogram does not contain the distribution of values, then both
|
||||
"explicit_bounds" and "bucket_counts" must be omitted and only "count" and
|
||||
"sum" are known. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_HistogramDataPoint {
|
||||
/* StartTimeUnixNano is optional but strongly encouraged, see the
|
||||
the detailed comments above Metric.
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t start_time_unix_nano;
|
||||
/* TimeUnixNano is required, see the detailed comments above Metric.
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t time_unix_nano;
|
||||
/* count is the number of values in the population. Must be non-negative. This
|
||||
value must be equal to the sum of the "count" fields in buckets if a
|
||||
histogram is provided. */
|
||||
uint64_t count;
|
||||
/* sum of the values in the population. If count is zero then this field
|
||||
must be zero.
|
||||
|
||||
Note: Sum should only be filled out when measuring non-negative discrete
|
||||
events, and is assumed to be monotonic over the values of these events.
|
||||
Negative events *can* be recorded, but sum should not be filled out when
|
||||
doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
||||
see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram */
|
||||
bool has_sum;
|
||||
double sum;
|
||||
/* bucket_counts is an optional field contains the count values of histogram
|
||||
for each bucket.
|
||||
|
||||
The sum of the bucket_counts must equal the value in the count field.
|
||||
|
||||
The number of elements in bucket_counts array must be by one greater than
|
||||
the number of elements in explicit_bounds array. */
|
||||
pb_callback_t bucket_counts;
|
||||
/* explicit_bounds specifies buckets with explicitly defined bounds for values.
|
||||
|
||||
The boundaries for bucket at index i are:
|
||||
|
||||
(-infinity, explicit_bounds[i]] for i == 0
|
||||
(explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
|
||||
(explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
|
||||
|
||||
The values in the explicit_bounds array must be strictly increasing.
|
||||
|
||||
Histogram buckets are inclusive of their upper boundary, except the last
|
||||
bucket where the boundary is at infinity. This format is intentionally
|
||||
compatible with the OpenMetrics histogram definition. */
|
||||
pb_callback_t explicit_bounds;
|
||||
/* (Optional) List of exemplars collected from
|
||||
measurements that were used to form the data point */
|
||||
pb_callback_t exemplars;
|
||||
/* The set of key/value pairs that uniquely identify the timeseries from
|
||||
where this point belongs. The list may be empty (may contain 0 elements).
|
||||
Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
attribute with the same key). */
|
||||
pb_callback_t attributes;
|
||||
/* Flags that apply to this specific data point. See DataPointFlags
|
||||
for the available flags and their meaning. */
|
||||
uint32_t flags;
|
||||
/* min is the minimum value over (start_time, end_time]. */
|
||||
bool has_min;
|
||||
double min;
|
||||
/* max is the maximum value over (start_time, end_time]. */
|
||||
bool has_max;
|
||||
double max;
|
||||
} opentelemetry_proto_metrics_v1_HistogramDataPoint;
|
||||
|
||||
/* Buckets are a set of bucket counts, encoded in a contiguous array
|
||||
of counts. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets {
|
||||
/* Offset is the bucket index of the first entry in the bucket_counts array.
|
||||
|
||||
Note: This uses a varint encoding as a simple form of compression. */
|
||||
int32_t offset;
|
||||
/* bucket_counts is an array of count values, where bucket_counts[i] carries
|
||||
the count of the bucket at index (offset+i). bucket_counts[i] is the count
|
||||
of values greater than base^(offset+i) and less than or equal to
|
||||
base^(offset+i+1).
|
||||
|
||||
Note: By contrast, the explicit HistogramDataPoint uses
|
||||
fixed64. This field is expected to have many buckets,
|
||||
especially zeros, so uint64 has been selected to ensure
|
||||
varint encoding. */
|
||||
pb_callback_t bucket_counts;
|
||||
} opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets;
|
||||
|
||||
/* ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
|
||||
time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
|
||||
summary statistics for a population of values, it may optionally contain the
|
||||
distribution of those values across a set of buckets. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint {
|
||||
/* The set of key/value pairs that uniquely identify the timeseries from
|
||||
where this point belongs. The list may be empty (may contain 0 elements).
|
||||
Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
attribute with the same key). */
|
||||
pb_callback_t attributes;
|
||||
/* StartTimeUnixNano is optional but strongly encouraged, see the
|
||||
the detailed comments above Metric.
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t start_time_unix_nano;
|
||||
/* TimeUnixNano is required, see the detailed comments above Metric.
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t time_unix_nano;
|
||||
/* count is the number of values in the population. Must be
|
||||
non-negative. This value must be equal to the sum of the "bucket_counts"
|
||||
values in the positive and negative Buckets plus the "zero_count" field. */
|
||||
uint64_t count;
|
||||
/* sum of the values in the population. If count is zero then this field
|
||||
must be zero.
|
||||
|
||||
Note: Sum should only be filled out when measuring non-negative discrete
|
||||
events, and is assumed to be monotonic over the values of these events.
|
||||
Negative events *can* be recorded, but sum should not be filled out when
|
||||
doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
||||
see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram */
|
||||
bool has_sum;
|
||||
double sum;
|
||||
/* scale describes the resolution of the histogram. Boundaries are
|
||||
located at powers of the base, where:
|
||||
|
||||
base = (2^(2^-scale))
|
||||
|
||||
The histogram bucket identified by `index`, a signed integer,
|
||||
contains values that are greater than (base^index) and
|
||||
less than or equal to (base^(index+1)).
|
||||
|
||||
The positive and negative ranges of the histogram are expressed
|
||||
separately. Negative values are mapped by their absolute value
|
||||
into the negative range using the same scale as the positive range.
|
||||
|
||||
scale is not restricted by the protocol, as the permissible
|
||||
values depend on the range of the data. */
|
||||
int32_t scale;
|
||||
/* zero_count is the count of values that are either exactly zero or
|
||||
within the region considered zero by the instrumentation at the
|
||||
tolerated degree of precision. This bucket stores values that
|
||||
cannot be expressed using the standard exponential formula as
|
||||
well as values that have been rounded to zero.
|
||||
|
||||
Implementations MAY consider the zero bucket to have probability
|
||||
mass equal to (zero_count / count). */
|
||||
uint64_t zero_count;
|
||||
/* positive carries the positive range of exponential bucket counts. */
|
||||
bool has_positive;
|
||||
opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets positive;
|
||||
/* negative carries the negative range of exponential bucket counts. */
|
||||
bool has_negative;
|
||||
opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets negative;
|
||||
/* Flags that apply to this specific data point. See DataPointFlags
|
||||
for the available flags and their meaning. */
|
||||
uint32_t flags;
|
||||
/* (Optional) List of exemplars collected from
|
||||
measurements that were used to form the data point */
|
||||
pb_callback_t exemplars;
|
||||
/* min is the minimum value over (start_time, end_time]. */
|
||||
bool has_min;
|
||||
double min;
|
||||
/* max is the maximum value over (start_time, end_time]. */
|
||||
bool has_max;
|
||||
double max;
|
||||
/* ZeroThreshold may be optionally set to convey the width of the zero
|
||||
region. Where the zero region is defined as the closed interval
|
||||
[-ZeroThreshold, ZeroThreshold].
|
||||
When ZeroThreshold is 0, zero count bucket stores values that cannot be
|
||||
expressed using the standard exponential formula as well as values that
|
||||
have been rounded to zero. */
|
||||
double zero_threshold;
|
||||
} opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint;
|
||||
|
||||
/* SummaryDataPoint is a single data point in a timeseries that describes the
|
||||
time-varying values of a Summary metric. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_SummaryDataPoint {
|
||||
/* StartTimeUnixNano is optional but strongly encouraged, see the
|
||||
the detailed comments above Metric.
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t start_time_unix_nano;
|
||||
/* TimeUnixNano is required, see the detailed comments above Metric.
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t time_unix_nano;
|
||||
/* count is the number of values in the population. Must be non-negative. */
|
||||
uint64_t count;
|
||||
/* sum of the values in the population. If count is zero then this field
|
||||
must be zero.
|
||||
|
||||
Note: Sum should only be filled out when measuring non-negative discrete
|
||||
events, and is assumed to be monotonic over the values of these events.
|
||||
Negative events *can* be recorded, but sum should not be filled out when
|
||||
doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
||||
see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary */
|
||||
double sum;
|
||||
/* (Optional) list of values at different quantiles of the distribution calculated
|
||||
from the current snapshot. The quantiles must be strictly increasing. */
|
||||
pb_callback_t quantile_values;
|
||||
/* The set of key/value pairs that uniquely identify the timeseries from
|
||||
where this point belongs. The list may be empty (may contain 0 elements).
|
||||
Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
attribute with the same key). */
|
||||
pb_callback_t attributes;
|
||||
/* Flags that apply to this specific data point. See DataPointFlags
|
||||
for the available flags and their meaning. */
|
||||
uint32_t flags;
|
||||
} opentelemetry_proto_metrics_v1_SummaryDataPoint;
|
||||
|
||||
/* Represents the value at a given quantile of a distribution.
|
||||
|
||||
To record Min and Max values following conventions are used:
|
||||
- The 1.0 quantile is equivalent to the maximum value observed.
|
||||
- The 0.0 quantile is equivalent to the minimum value observed.
|
||||
|
||||
See the following issue for more context:
|
||||
https://github.com/open-telemetry/opentelemetry-proto/issues/125 */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile {
|
||||
/* The quantile of a distribution. Must be in the interval
|
||||
[0.0, 1.0]. */
|
||||
double quantile;
|
||||
/* The value at the given quantile of a distribution.
|
||||
|
||||
Quantile values must NOT be negative. */
|
||||
double value;
|
||||
} opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile;
|
||||
|
||||
/* A representation of an exemplar, which is a sample input measurement.
|
||||
Exemplars also hold information about the environment when the measurement
|
||||
was recorded, for example the span and trace ID of the active span when the
|
||||
exemplar was recorded. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_Exemplar {
|
||||
/* time_unix_nano is the exact time when this exemplar was recorded
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t time_unix_nano;
|
||||
pb_size_t which_value;
|
||||
union {
|
||||
double as_double;
|
||||
int64_t as_int;
|
||||
} value;
|
||||
/* (Optional) Span ID of the exemplar trace.
|
||||
span_id may be missing if the measurement is not recorded inside a trace
|
||||
or if the trace is not sampled. */
|
||||
pb_callback_t span_id;
|
||||
/* (Optional) Trace ID of the exemplar trace.
|
||||
trace_id may be missing if the measurement is not recorded inside a trace
|
||||
or if the trace is not sampled. */
|
||||
pb_callback_t trace_id;
|
||||
/* The set of key/value pairs that were filtered out by the aggregator, but
|
||||
recorded alongside the original measurement. Only key/value pairs that were
|
||||
filtered out by the aggregator should be included */
|
||||
pb_callback_t filtered_attributes;
|
||||
} opentelemetry_proto_metrics_v1_Exemplar;
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Helper constants for enums */
|
||||
#define _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
|
||||
#define _opentelemetry_proto_metrics_v1_AggregationTemporality_MAX opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE
|
||||
#define _opentelemetry_proto_metrics_v1_AggregationTemporality_ARRAYSIZE ((opentelemetry_proto_metrics_v1_AggregationTemporality)(opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE+1))
|
||||
|
||||
#define _opentelemetry_proto_metrics_v1_DataPointFlags_MIN opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_DO_NOT_USE
|
||||
#define _opentelemetry_proto_metrics_v1_DataPointFlags_MAX opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK
|
||||
#define _opentelemetry_proto_metrics_v1_DataPointFlags_ARRAYSIZE ((opentelemetry_proto_metrics_v1_DataPointFlags)(opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK+1))
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_Sum_aggregation_temporality_ENUMTYPE opentelemetry_proto_metrics_v1_AggregationTemporality
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_aggregation_temporality_ENUMTYPE opentelemetry_proto_metrics_v1_AggregationTemporality
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_aggregation_temporality_ENUMTYPE opentelemetry_proto_metrics_v1_AggregationTemporality
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/* Initializer values for message structs */
|
||||
#define opentelemetry_proto_metrics_v1_MetricsData_init_default {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_init_default {false, opentelemetry_proto_resource_v1_Resource_init_default, {{NULL}, NULL}, {{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_init_default {false, opentelemetry_proto_common_v1_InstrumentationScope_init_default, {{NULL}, NULL}, {{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_Metric_init_default {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0, {opentelemetry_proto_metrics_v1_Gauge_init_default}}
|
||||
#define opentelemetry_proto_metrics_v1_Gauge_init_default {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_Sum_init_default {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN, 0}
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_init_default {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN}
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_init_default {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN}
|
||||
#define opentelemetry_proto_metrics_v1_Summary_init_default {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_init_default {0, 0, 0, {0}, {{NULL}, NULL}, {{NULL}, NULL}, 0}
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_init_default {0, 0, 0, false, 0, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0, false, 0, false, 0}
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_init_default {{{NULL}, NULL}, 0, 0, 0, false, 0, 0, 0, false, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_default, false, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_default, 0, {{NULL}, NULL}, false, 0, false, 0, 0}
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_default {0, {{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_init_default {0, 0, 0, 0, {{NULL}, NULL}, {{NULL}, NULL}, 0}
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_init_default {0, 0}
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_init_default {0, 0, {0}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_MetricsData_init_zero {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_init_zero {false, opentelemetry_proto_resource_v1_Resource_init_zero, {{NULL}, NULL}, {{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_init_zero {false, opentelemetry_proto_common_v1_InstrumentationScope_init_zero, {{NULL}, NULL}, {{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_Metric_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0, {opentelemetry_proto_metrics_v1_Gauge_init_zero}}
|
||||
#define opentelemetry_proto_metrics_v1_Gauge_init_zero {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_Sum_init_zero {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN, 0}
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_init_zero {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN}
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_init_zero {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN}
|
||||
#define opentelemetry_proto_metrics_v1_Summary_init_zero {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_init_zero {0, 0, 0, {0}, {{NULL}, NULL}, {{NULL}, NULL}, 0}
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_init_zero {0, 0, 0, false, 0, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0, false, 0, false, 0}
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_init_zero {{{NULL}, NULL}, 0, 0, 0, false, 0, 0, 0, false, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_zero, false, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_zero, 0, {{NULL}, NULL}, false, 0, false, 0, 0}
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_zero {0, {{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_init_zero {0, 0, 0, 0, {{NULL}, NULL}, {{NULL}, NULL}, 0}
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_init_zero {0, 0}
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_init_zero {0, 0, {0}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}}
|
||||
|
||||
/* Field tags (for use in manual encoding/decoding) */
|
||||
#define opentelemetry_proto_metrics_v1_MetricsData_resource_metrics_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_resource_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_scope_metrics_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_schema_url_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_scope_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_metrics_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_schema_url_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_Gauge_data_points_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_Sum_data_points_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_Sum_aggregation_temporality_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_Sum_is_monotonic_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_data_points_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_aggregation_temporality_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_data_points_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_aggregation_temporality_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_Summary_data_points_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_Metric_name_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_Metric_description_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_Metric_unit_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_Metric_gauge_tag 5
|
||||
#define opentelemetry_proto_metrics_v1_Metric_sum_tag 7
|
||||
#define opentelemetry_proto_metrics_v1_Metric_histogram_tag 9
|
||||
#define opentelemetry_proto_metrics_v1_Metric_exponential_histogram_tag 10
|
||||
#define opentelemetry_proto_metrics_v1_Metric_summary_tag 11
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_start_time_unix_nano_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_time_unix_nano_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_as_double_tag 4
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_as_int_tag 6
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_exemplars_tag 5
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_attributes_tag 7
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_flags_tag 8
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_start_time_unix_nano_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_time_unix_nano_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_count_tag 4
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_sum_tag 5
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_bucket_counts_tag 6
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_explicit_bounds_tag 7
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_exemplars_tag 8
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_attributes_tag 9
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_flags_tag 10
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_min_tag 11
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_max_tag 12
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_offset_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_bucket_counts_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_attributes_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_start_time_unix_nano_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_time_unix_nano_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_count_tag 4
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_sum_tag 5
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_scale_tag 6
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_zero_count_tag 7
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_positive_tag 8
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_negative_tag 9
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_flags_tag 10
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_exemplars_tag 11
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_min_tag 12
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_max_tag 13
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_zero_threshold_tag 14
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_start_time_unix_nano_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_time_unix_nano_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_count_tag 4
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_sum_tag 5
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_quantile_values_tag 6
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_attributes_tag 7
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_flags_tag 8
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_quantile_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_value_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_time_unix_nano_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_as_double_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_as_int_tag 6
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_span_id_tag 4
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_trace_id_tag 5
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_filtered_attributes_tag 7
|
||||
|
||||
/* Struct field encoding specification for nanopb */
|
||||
#define opentelemetry_proto_metrics_v1_MetricsData_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, resource_metrics, 1)
|
||||
#define opentelemetry_proto_metrics_v1_MetricsData_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_MetricsData_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_MetricsData_resource_metrics_MSGTYPE opentelemetry_proto_metrics_v1_ResourceMetrics
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_FIELDLIST(X, a) \
|
||||
X(a, STATIC, OPTIONAL, MESSAGE, resource, 1) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, scope_metrics, 2) \
|
||||
X(a, CALLBACK, SINGULAR, STRING, schema_url, 3)
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_resource_MSGTYPE opentelemetry_proto_resource_v1_Resource
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_scope_metrics_MSGTYPE opentelemetry_proto_metrics_v1_ScopeMetrics
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_FIELDLIST(X, a) \
|
||||
X(a, STATIC, OPTIONAL, MESSAGE, scope, 1) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, metrics, 2) \
|
||||
X(a, CALLBACK, SINGULAR, STRING, schema_url, 3)
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_scope_MSGTYPE opentelemetry_proto_common_v1_InstrumentationScope
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_metrics_MSGTYPE opentelemetry_proto_metrics_v1_Metric
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_Metric_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, SINGULAR, STRING, name, 1) \
|
||||
X(a, CALLBACK, SINGULAR, STRING, description, 2) \
|
||||
X(a, CALLBACK, SINGULAR, STRING, unit, 3) \
|
||||
X(a, STATIC, ONEOF, MSG_W_CB, (data,gauge,data.gauge), 5) \
|
||||
X(a, STATIC, ONEOF, MSG_W_CB, (data,sum,data.sum), 7) \
|
||||
X(a, STATIC, ONEOF, MSG_W_CB, (data,histogram,data.histogram), 9) \
|
||||
X(a, STATIC, ONEOF, MSG_W_CB, (data,exponential_histogram,data.exponential_histogram), 10) \
|
||||
X(a, STATIC, ONEOF, MSG_W_CB, (data,summary,data.summary), 11)
|
||||
#define opentelemetry_proto_metrics_v1_Metric_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_Metric_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_Metric_data_gauge_MSGTYPE opentelemetry_proto_metrics_v1_Gauge
|
||||
#define opentelemetry_proto_metrics_v1_Metric_data_sum_MSGTYPE opentelemetry_proto_metrics_v1_Sum
|
||||
#define opentelemetry_proto_metrics_v1_Metric_data_histogram_MSGTYPE opentelemetry_proto_metrics_v1_Histogram
|
||||
#define opentelemetry_proto_metrics_v1_Metric_data_exponential_histogram_MSGTYPE opentelemetry_proto_metrics_v1_ExponentialHistogram
|
||||
#define opentelemetry_proto_metrics_v1_Metric_data_summary_MSGTYPE opentelemetry_proto_metrics_v1_Summary
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_Gauge_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1)
|
||||
#define opentelemetry_proto_metrics_v1_Gauge_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_Gauge_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_Gauge_data_points_MSGTYPE opentelemetry_proto_metrics_v1_NumberDataPoint
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_Sum_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1) \
|
||||
X(a, STATIC, SINGULAR, UENUM, aggregation_temporality, 2) \
|
||||
X(a, STATIC, SINGULAR, BOOL, is_monotonic, 3)
|
||||
#define opentelemetry_proto_metrics_v1_Sum_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_Sum_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_Sum_data_points_MSGTYPE opentelemetry_proto_metrics_v1_NumberDataPoint
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1) \
|
||||
X(a, STATIC, SINGULAR, UENUM, aggregation_temporality, 2)
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_data_points_MSGTYPE opentelemetry_proto_metrics_v1_HistogramDataPoint
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1) \
|
||||
X(a, STATIC, SINGULAR, UENUM, aggregation_temporality, 2)
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_data_points_MSGTYPE opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_Summary_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1)
|
||||
#define opentelemetry_proto_metrics_v1_Summary_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_Summary_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_Summary_data_points_MSGTYPE opentelemetry_proto_metrics_v1_SummaryDataPoint
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_FIELDLIST(X, a) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, start_time_unix_nano, 2) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 3) \
|
||||
X(a, STATIC, ONEOF, DOUBLE, (value,as_double,value.as_double), 4) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, exemplars, 5) \
|
||||
X(a, STATIC, ONEOF, SFIXED64, (value,as_int,value.as_int), 6) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, attributes, 7) \
|
||||
X(a, STATIC, SINGULAR, UINT32, flags, 8)
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_exemplars_MSGTYPE opentelemetry_proto_metrics_v1_Exemplar
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_FIELDLIST(X, a) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, start_time_unix_nano, 2) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 3) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, count, 4) \
|
||||
X(a, STATIC, OPTIONAL, DOUBLE, sum, 5) \
|
||||
X(a, CALLBACK, REPEATED, FIXED64, bucket_counts, 6) \
|
||||
X(a, CALLBACK, REPEATED, DOUBLE, explicit_bounds, 7) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, exemplars, 8) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, attributes, 9) \
|
||||
X(a, STATIC, SINGULAR, UINT32, flags, 10) \
|
||||
X(a, STATIC, OPTIONAL, DOUBLE, min, 11) \
|
||||
X(a, STATIC, OPTIONAL, DOUBLE, max, 12)
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_exemplars_MSGTYPE opentelemetry_proto_metrics_v1_Exemplar
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, attributes, 1) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, start_time_unix_nano, 2) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 3) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, count, 4) \
|
||||
X(a, STATIC, OPTIONAL, DOUBLE, sum, 5) \
|
||||
X(a, STATIC, SINGULAR, SINT32, scale, 6) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, zero_count, 7) \
|
||||
X(a, STATIC, OPTIONAL, MESSAGE, positive, 8) \
|
||||
X(a, STATIC, OPTIONAL, MESSAGE, negative, 9) \
|
||||
X(a, STATIC, SINGULAR, UINT32, flags, 10) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, exemplars, 11) \
|
||||
X(a, STATIC, OPTIONAL, DOUBLE, min, 12) \
|
||||
X(a, STATIC, OPTIONAL, DOUBLE, max, 13) \
|
||||
X(a, STATIC, SINGULAR, DOUBLE, zero_threshold, 14)
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_positive_MSGTYPE opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_negative_MSGTYPE opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_exemplars_MSGTYPE opentelemetry_proto_metrics_v1_Exemplar
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_FIELDLIST(X, a) \
|
||||
X(a, STATIC, SINGULAR, SINT32, offset, 1) \
|
||||
X(a, CALLBACK, REPEATED, UINT64, bucket_counts, 2)
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_DEFAULT NULL
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_FIELDLIST(X, a) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, start_time_unix_nano, 2) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 3) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, count, 4) \
|
||||
X(a, STATIC, SINGULAR, DOUBLE, sum, 5) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, quantile_values, 6) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, attributes, 7) \
|
||||
X(a, STATIC, SINGULAR, UINT32, flags, 8)
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_quantile_values_MSGTYPE opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_FIELDLIST(X, a) \
|
||||
X(a, STATIC, SINGULAR, DOUBLE, quantile, 1) \
|
||||
X(a, STATIC, SINGULAR, DOUBLE, value, 2)
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_CALLBACK NULL
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_DEFAULT NULL
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_FIELDLIST(X, a) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 2) \
|
||||
X(a, STATIC, ONEOF, DOUBLE, (value,as_double,value.as_double), 3) \
|
||||
X(a, CALLBACK, SINGULAR, BYTES, span_id, 4) \
|
||||
X(a, CALLBACK, SINGULAR, BYTES, trace_id, 5) \
|
||||
X(a, STATIC, ONEOF, SFIXED64, (value,as_int,value.as_int), 6) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, filtered_attributes, 7)
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_filtered_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue
|
||||
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_MetricsData_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ResourceMetrics_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ScopeMetrics_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Metric_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Gauge_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Sum_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Histogram_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ExponentialHistogram_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Summary_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_NumberDataPoint_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_HistogramDataPoint_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_SummaryDataPoint_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Exemplar_msg;
|
||||
|
||||
/* Defines for backwards compatibility with code written before nanopb-0.4.0 */
|
||||
#define opentelemetry_proto_metrics_v1_MetricsData_fields &opentelemetry_proto_metrics_v1_MetricsData_msg
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_fields &opentelemetry_proto_metrics_v1_ResourceMetrics_msg
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_fields &opentelemetry_proto_metrics_v1_ScopeMetrics_msg
|
||||
#define opentelemetry_proto_metrics_v1_Metric_fields &opentelemetry_proto_metrics_v1_Metric_msg
|
||||
#define opentelemetry_proto_metrics_v1_Gauge_fields &opentelemetry_proto_metrics_v1_Gauge_msg
|
||||
#define opentelemetry_proto_metrics_v1_Sum_fields &opentelemetry_proto_metrics_v1_Sum_msg
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_fields &opentelemetry_proto_metrics_v1_Histogram_msg
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_fields &opentelemetry_proto_metrics_v1_ExponentialHistogram_msg
|
||||
#define opentelemetry_proto_metrics_v1_Summary_fields &opentelemetry_proto_metrics_v1_Summary_msg
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_fields &opentelemetry_proto_metrics_v1_NumberDataPoint_msg
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_fields &opentelemetry_proto_metrics_v1_HistogramDataPoint_msg
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_fields &opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_msg
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_fields &opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_msg
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_fields &opentelemetry_proto_metrics_v1_SummaryDataPoint_msg
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_fields &opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_msg
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_fields &opentelemetry_proto_metrics_v1_Exemplar_msg
|
||||
|
||||
/* Maximum encoded size of messages (where known) */
|
||||
/* opentelemetry_proto_metrics_v1_MetricsData_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_ResourceMetrics_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_ScopeMetrics_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_Metric_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_Gauge_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_Sum_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_Histogram_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_ExponentialHistogram_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_Summary_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_NumberDataPoint_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_HistogramDataPoint_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_SummaryDataPoint_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_Exemplar_size depends on runtime parameters */
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_size 18
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
/* Automatically generated nanopb constant definitions */
|
||||
/* Generated by nanopb-0.4.8-dev */
|
||||
|
||||
#include "opentelemetry/resource.pb.h"
|
||||
#if PB_PROTO_HEADER_VERSION != 40
|
||||
#error Regenerate this file with the current version of nanopb generator.
|
||||
#endif
|
||||
|
||||
PB_BIND(opentelemetry_proto_resource_v1_Resource, opentelemetry_proto_resource_v1_Resource, AUTO)
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
/* Automatically generated nanopb header */
|
||||
/* Generated by nanopb-0.4.8-dev */
|
||||
|
||||
#ifndef PB_OPENTELEMETRY_PROTO_RESOURCE_V1_OPENTELEMETRY_PROTO_RESOURCE_V1_RESOURCE_PB_H_INCLUDED
|
||||
#define PB_OPENTELEMETRY_PROTO_RESOURCE_V1_OPENTELEMETRY_PROTO_RESOURCE_V1_RESOURCE_PB_H_INCLUDED
|
||||
#include <nanopb/pb.h>
|
||||
#include "opentelemetry/common.pb.h"
|
||||
|
||||
#if PB_PROTO_HEADER_VERSION != 40
|
||||
#error Regenerate this file with the current version of nanopb generator.
|
||||
#endif
|
||||
|
||||
/* Struct definitions */
|
||||
/* Resource information. */
|
||||
typedef struct _opentelemetry_proto_resource_v1_Resource {
|
||||
/* Set of attributes that describe the resource.
|
||||
Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
attribute with the same key). */
|
||||
pb_callback_t attributes;
|
||||
/* dropped_attributes_count is the number of dropped attributes. If the value is 0, then
|
||||
no attributes were dropped. */
|
||||
uint32_t dropped_attributes_count;
|
||||
} opentelemetry_proto_resource_v1_Resource;
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Initializer values for message structs */
|
||||
#define opentelemetry_proto_resource_v1_Resource_init_default {{{NULL}, NULL}, 0}
|
||||
#define opentelemetry_proto_resource_v1_Resource_init_zero {{{NULL}, NULL}, 0}
|
||||
|
||||
/* Field tags (for use in manual encoding/decoding) */
|
||||
#define opentelemetry_proto_resource_v1_Resource_attributes_tag 1
|
||||
#define opentelemetry_proto_resource_v1_Resource_dropped_attributes_count_tag 2
|
||||
|
||||
/* Struct field encoding specification for nanopb */
|
||||
#define opentelemetry_proto_resource_v1_Resource_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, attributes, 1) \
|
||||
X(a, STATIC, SINGULAR, UINT32, dropped_attributes_count, 2)
|
||||
#define opentelemetry_proto_resource_v1_Resource_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_resource_v1_Resource_DEFAULT NULL
|
||||
#define opentelemetry_proto_resource_v1_Resource_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue
|
||||
|
||||
extern const pb_msgdesc_t opentelemetry_proto_resource_v1_Resource_msg;
|
||||
|
||||
/* Defines for backwards compatibility with code written before nanopb-0.4.0 */
|
||||
#define opentelemetry_proto_resource_v1_Resource_fields &opentelemetry_proto_resource_v1_Resource_msg
|
||||
|
||||
/* Maximum encoded size of messages (where known) */
|
||||
/* opentelemetry_proto_resource_v1_Resource_size depends on runtime parameters */
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librd - Rapid Development C library
|
||||
*
|
||||
* Copyright (c) 2012, Magnus Edenhill
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* 2023, Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -52,6 +53,7 @@
|
|||
#include <time.h>
|
||||
#include <assert.h>
|
||||
#include <limits.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include "tinycthread.h"
|
||||
#include "rdsysqueue.h"
|
||||
|
|
@ -219,7 +221,7 @@ static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) {
|
|||
|
||||
|
||||
/* Round/align X upwards to STRIDE, which must be power of 2. */
|
||||
#define RD_ROUNDUP(X, STRIDE) (((X) + ((STRIDE)-1)) & ~(STRIDE - 1))
|
||||
#define RD_ROUNDUP(X, STRIDE) (((X) + ((STRIDE) - 1)) & ~(STRIDE - 1))
|
||||
|
||||
#define RD_ARRAY_SIZE(A) (sizeof((A)) / sizeof(*(A)))
|
||||
#define RD_ARRAYSIZE(A) RD_ARRAY_SIZE(A)
|
||||
|
|
@ -424,6 +426,10 @@ static RD_INLINE RD_UNUSED int rd_refcnt_get(rd_refcnt_t *R) {
|
|||
} while (0)
|
||||
|
||||
|
||||
#define RD_INTERFACE_CALL(i, name, ...) (i->name(i->opaque, __VA_ARGS__))
|
||||
|
||||
#define RD_CEIL_INTEGER_DIVISION(X, DEN) (((X) + ((DEN) - 1)) / (DEN))
|
||||
|
||||
/**
|
||||
* @brief Utility types to hold memory,size tuple.
|
||||
*/
|
||||
|
|
@ -433,4 +439,140 @@ typedef struct rd_chariov_s {
|
|||
size_t size;
|
||||
} rd_chariov_t;
|
||||
|
||||
/**
|
||||
* @brief Read the file at \p file_path in binary mode and return its contents.
|
||||
* The returned buffer is NULL-terminated,
|
||||
* the size parameter will contain the actual file size.
|
||||
*
|
||||
* @param file_path Path to the file to read.
|
||||
* @param size Pointer to store the file size (optional).
|
||||
* @param max_size Maximum file size to read (0 for no limit) (optional).
|
||||
*
|
||||
* @returns Newly allocated buffer containing the file contents.
|
||||
* NULL on error (file not found, too large, etc).
|
||||
*
|
||||
* @remark The returned pointer ownership is transferred to the caller.
|
||||
*
|
||||
* @locality Any thread
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED char *
|
||||
rd_file_read(const char *file_path, size_t *size, size_t max_size) {
|
||||
FILE *file;
|
||||
char *buf = NULL;
|
||||
size_t file_size;
|
||||
size_t read_size;
|
||||
if (!size)
|
||||
size = &read_size;
|
||||
|
||||
#ifndef _WIN32
|
||||
file = fopen(file_path, "rb");
|
||||
#else
|
||||
file = NULL;
|
||||
errno = fopen_s(&file, file_path, "rb");
|
||||
#endif
|
||||
if (!file)
|
||||
return NULL;
|
||||
|
||||
if (fseek(file, 0, SEEK_END) != 0)
|
||||
goto err;
|
||||
|
||||
file_size = (size_t)ftell(file);
|
||||
if (file_size < 0)
|
||||
goto err;
|
||||
|
||||
if (fseek(file, 0, SEEK_SET) != 0)
|
||||
goto err;
|
||||
|
||||
/* Check if file is too large */
|
||||
if (max_size > 0 && file_size > max_size)
|
||||
goto err;
|
||||
|
||||
/* Allocate buffer with extra byte for NULL terminator */
|
||||
buf = (char *)rd_malloc(file_size + 1);
|
||||
read_size = fread(buf, 1, file_size, file);
|
||||
|
||||
if (read_size != file_size)
|
||||
goto err;
|
||||
|
||||
/* NULL terminate the buffer */
|
||||
buf[file_size] = '\0';
|
||||
*size = file_size;
|
||||
fclose(file);
|
||||
return buf;
|
||||
err:
|
||||
fclose(file);
|
||||
if (buf)
|
||||
rd_free(buf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static RD_INLINE RD_UNUSED FILE *
|
||||
rd_file_mkstemp(const char *prefix,
|
||||
const char *mode,
|
||||
char *tempfile_path_out,
|
||||
size_t tempfile_path_out_size) {
|
||||
FILE *tempfile;
|
||||
|
||||
#ifdef _WIN32
|
||||
char tempfolder_path[MAX_PATH];
|
||||
char tempfile_path[MAX_PATH];
|
||||
if (!GetTempPathA(MAX_PATH, tempfolder_path))
|
||||
return NULL; /* Failed to get temp folder path */
|
||||
|
||||
|
||||
if (!GetTempFileNameA(tempfolder_path, "TMP", 1, tempfile_path))
|
||||
return NULL; /* Failed to create temp file name */
|
||||
|
||||
tempfile = fopen(tempfile_path, mode);
|
||||
#else
|
||||
int tempfile_fd;
|
||||
char tempfile_path[512];
|
||||
rd_snprintf(tempfile_path, sizeof(tempfile_path), "/tmp/%sXXXXXX",
|
||||
prefix);
|
||||
tempfile_fd = mkstemp(tempfile_path);
|
||||
if (tempfile_fd < 0)
|
||||
return NULL;
|
||||
|
||||
tempfile = fdopen(tempfile_fd, mode);
|
||||
#endif
|
||||
|
||||
if (!tempfile)
|
||||
return NULL;
|
||||
|
||||
if (tempfile_path_out)
|
||||
rd_snprintf(tempfile_path_out, tempfile_path_out_size, "%s",
|
||||
tempfile_path);
|
||||
return tempfile;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Retrive stat for a \p path .
|
||||
*
|
||||
* @param path Path to the file or directory.
|
||||
* @param is_dir Pointer to store if the \p path is a directory (optional).
|
||||
*
|
||||
* @return `rd_true` if the path exists.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED rd_bool_t rd_file_stat(const char *path,
|
||||
rd_bool_t *is_dir) {
|
||||
#ifdef _WIN32
|
||||
struct _stat st;
|
||||
if (_stat(path, &st) == 0) {
|
||||
if (is_dir)
|
||||
*is_dir = st.st_mode & S_IFDIR;
|
||||
return rd_true;
|
||||
}
|
||||
#else
|
||||
struct stat st;
|
||||
if (stat(path, &st) == 0) {
|
||||
if (is_dir)
|
||||
*is_dir = S_ISDIR(st.st_mode);
|
||||
return rd_true;
|
||||
}
|
||||
#endif
|
||||
if (is_dir)
|
||||
*is_dir = rd_false;
|
||||
return rd_false;
|
||||
}
|
||||
|
||||
#endif /* _RD_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librd - Rapid Development C library
|
||||
*
|
||||
* Copyright (c) 2012, Magnus Edenhill
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librd - Rapid Development C library
|
||||
*
|
||||
* Copyright (c) 2012, Magnus Edenhill
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -54,14 +54,13 @@ typedef union {
|
|||
#define sinx_family in.sin_family
|
||||
#define sinx_addr in.sin_addr
|
||||
#define RD_SOCKADDR_INX_LEN(sinx) \
|
||||
((sinx)->sinx_family == AF_INET \
|
||||
? sizeof(struct sockaddr_in) \
|
||||
((sinx)->sinx_family == AF_INET ? sizeof(struct sockaddr_in) \
|
||||
: (sinx)->sinx_family == AF_INET6 ? sizeof(struct sockaddr_in6) \
|
||||
: sizeof(rd_sockaddr_inx_t))
|
||||
#define RD_SOCKADDR_INX_PORT(sinx) \
|
||||
((sinx)->sinx_family == AF_INET \
|
||||
? (sinx)->in.sin_port \
|
||||
: (sinx)->sinx_family == AF_INET6 ? (sinx)->in6.sin6_port : 0)
|
||||
((sinx)->sinx_family == AF_INET ? (sinx)->in.sin_port \
|
||||
: (sinx)->sinx_family == AF_INET6 ? (sinx)->in6.sin6_port \
|
||||
: 0)
|
||||
|
||||
#define RD_SOCKADDR_INX_PORT_SET(sinx, port) \
|
||||
do { \
|
||||
|
|
@ -139,7 +138,7 @@ rd_sockaddr_list_next(rd_sockaddr_list_t *rsal) {
|
|||
|
||||
#define RD_SOCKADDR_LIST_FOREACH(sinx, rsal) \
|
||||
for ((sinx) = &(rsal)->rsal_addr[0]; \
|
||||
(sinx) < &(rsal)->rsal_addr[(rsal)->rsal_len]; (sinx)++)
|
||||
(sinx) < &(rsal)->rsal_addr[(rsal)->rsal_cnt]; (sinx)++)
|
||||
|
||||
/**
|
||||
* Wrapper for getaddrinfo(3) that performs these additional tasks:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014-2016 Magnus Edenhill
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018 Magnus Edenhill
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -45,7 +45,8 @@ typedef struct rd_avg_s {
|
|||
} ra_v;
|
||||
mtx_t ra_lock;
|
||||
int ra_enabled;
|
||||
enum { RD_AVG_GAUGE,
|
||||
enum {
|
||||
RD_AVG_GAUGE,
|
||||
RD_AVG_COUNTER,
|
||||
} ra_type;
|
||||
#if WITH_HDRHISTOGRAM
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librd - Rapid Development C library
|
||||
*
|
||||
* Copyright (c) 2012-2016, Magnus Edenhill
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librd - Rapid Development C library
|
||||
*
|
||||
* Copyright (c) 2012-2016, Magnus Edenhill
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -0,0 +1,200 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2023 Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "rdbase64.h"
|
||||
|
||||
#if WITH_SSL
|
||||
#include <openssl/ssl.h>
|
||||
#else
|
||||
|
||||
#define conv_bin2ascii(a, table) ((table)[(a) & 0x3f])
|
||||
|
||||
static const unsigned char data_bin2ascii[65] =
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
|
||||
|
||||
static int base64_encoding_conversion(unsigned char *out,
|
||||
const unsigned char *in,
|
||||
int dlen) {
|
||||
int i, ret = 0;
|
||||
unsigned long l;
|
||||
|
||||
for (i = dlen; i > 0; i -= 3) {
|
||||
if (i >= 3) {
|
||||
l = (((unsigned long)in[0]) << 16L) |
|
||||
(((unsigned long)in[1]) << 8L) | in[2];
|
||||
*(out++) = conv_bin2ascii(l >> 18L, data_bin2ascii);
|
||||
*(out++) = conv_bin2ascii(l >> 12L, data_bin2ascii);
|
||||
*(out++) = conv_bin2ascii(l >> 6L, data_bin2ascii);
|
||||
*(out++) = conv_bin2ascii(l, data_bin2ascii);
|
||||
} else {
|
||||
l = ((unsigned long)in[0]) << 16L;
|
||||
if (i == 2)
|
||||
l |= ((unsigned long)in[1] << 8L);
|
||||
|
||||
*(out++) = conv_bin2ascii(l >> 18L, data_bin2ascii);
|
||||
*(out++) = conv_bin2ascii(l >> 12L, data_bin2ascii);
|
||||
*(out++) =
|
||||
(i == 1) ? '='
|
||||
: conv_bin2ascii(l >> 6L, data_bin2ascii);
|
||||
*(out++) = '=';
|
||||
}
|
||||
ret += 4;
|
||||
in += 3;
|
||||
}
|
||||
|
||||
*out = '\0';
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Base64 encode binary input \p in, and write base64-encoded string
|
||||
* and it's size to \p out. out->ptr will be NULL in case of some issue
|
||||
* with the conversion or the conversion is not supported.
|
||||
*
|
||||
* @remark out->ptr must be freed after use.
|
||||
*/
|
||||
void rd_base64_encode(const rd_chariov_t *in, rd_chariov_t *out) {
|
||||
|
||||
size_t max_len;
|
||||
|
||||
/* OpenSSL takes an |int| argument so the input cannot exceed that. */
|
||||
if (in->size > INT_MAX) {
|
||||
out->ptr = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
max_len = (((in->size + 2) / 3) * 4) + 1;
|
||||
out->ptr = rd_malloc(max_len);
|
||||
|
||||
#if WITH_SSL
|
||||
out->size = EVP_EncodeBlock((unsigned char *)out->ptr,
|
||||
(unsigned char *)in->ptr, (int)in->size);
|
||||
#else
|
||||
out->size = base64_encoding_conversion(
|
||||
(unsigned char *)out->ptr, (unsigned char *)in->ptr, (int)in->size);
|
||||
#endif
|
||||
|
||||
rd_assert(out->size < max_len);
|
||||
out->ptr[out->size] = 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Base64 encode binary input \p in.
|
||||
* @returns a newly allocated, base64-encoded string or NULL in case of some
|
||||
* issue with the conversion or the conversion is not supported.
|
||||
*
|
||||
* @remark Returned string must be freed after use.
|
||||
*/
|
||||
char *rd_base64_encode_str(const rd_chariov_t *in) {
|
||||
rd_chariov_t out;
|
||||
rd_base64_encode(in, &out);
|
||||
return out.ptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Base64 encode binary input \p in and return a newly allocated,
|
||||
* base64-encoded string with URL-safe characters.
|
||||
* @returns a newly allocated, base64-encoded string or NULL in case of some
|
||||
* issue with the conversion or the conversion is not supported.
|
||||
*
|
||||
* @remark Returned string must be freed after use.
|
||||
*/
|
||||
char *rd_base64_encode_str_urlsafe(const rd_chariov_t *in) {
|
||||
rd_chariov_t out;
|
||||
char *p;
|
||||
rd_base64_encode(in, &out);
|
||||
|
||||
/* Replace + with - and / with _ */
|
||||
for (p = out.ptr; *p; p++) {
|
||||
if (*p == '+')
|
||||
*p = '-';
|
||||
else if (*p == '/')
|
||||
*p = '_';
|
||||
}
|
||||
|
||||
/* Remove padding '=' characters */
|
||||
int newlen = strlen(out.ptr);
|
||||
while (newlen > 0 && out.ptr[newlen - 1] == '=') {
|
||||
out.ptr[newlen - 1] = '\0';
|
||||
newlen--;
|
||||
}
|
||||
|
||||
out.size = newlen;
|
||||
return out.ptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Base64 decode input string \p in. Ignores leading and trailing
|
||||
* whitespace.
|
||||
* @returns * 0 on successes in which case a newly allocated binary string is
|
||||
* set in \p out (and size).
|
||||
* * -1 on invalid Base64.
|
||||
* * -2 on conversion not supported.
|
||||
*/
|
||||
int rd_base64_decode(const rd_chariov_t *in, rd_chariov_t *out) {
|
||||
|
||||
#if WITH_SSL
|
||||
size_t ret_len;
|
||||
|
||||
/* OpenSSL takes an |int| argument, so |in->size| must not exceed
|
||||
* that. */
|
||||
if (in->size % 4 != 0 || in->size > INT_MAX) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret_len = ((in->size / 4) * 3);
|
||||
out->ptr = rd_malloc(ret_len + 1);
|
||||
|
||||
if (EVP_DecodeBlock((unsigned char *)out->ptr, (unsigned char *)in->ptr,
|
||||
(int)in->size) == -1) {
|
||||
rd_free(out->ptr);
|
||||
out->ptr = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* EVP_DecodeBlock will pad the output with trailing NULs and count
|
||||
* them in the return value. */
|
||||
if (in->size > 1 && in->ptr[in->size - 1] == '=') {
|
||||
if (in->size > 2 && in->ptr[in->size - 2] == '=') {
|
||||
ret_len -= 2;
|
||||
} else {
|
||||
ret_len -= 1;
|
||||
}
|
||||
}
|
||||
|
||||
out->ptr[ret_len] = 0;
|
||||
out->size = ret_len;
|
||||
|
||||
return 0;
|
||||
#else
|
||||
return -2;
|
||||
#endif
|
||||
}
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2023 Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef _RDBASE64_H_
|
||||
#define _RDBASE64_H_
|
||||
|
||||
#include "rd.h"
|
||||
|
||||
void rd_base64_encode(const rd_chariov_t *in, rd_chariov_t *out);
|
||||
|
||||
char *rd_base64_encode_str(const rd_chariov_t *in);
|
||||
|
||||
char *rd_base64_encode_str_urlsafe(const rd_chariov_t *in);
|
||||
|
||||
int rd_base64_decode(const rd_chariov_t *in, rd_chariov_t *out);
|
||||
|
||||
#endif /* _RDBASE64_H_ */
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2017 Magnus Edenhill
|
||||
* Copyright (c) 2017-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -660,14 +660,17 @@ size_t rd_buf_erase(rd_buf_t *rbuf, size_t absof, size_t size) {
|
|||
segremains);
|
||||
|
||||
seg->seg_of -= toerase;
|
||||
seg->seg_erased += toerase;
|
||||
rbuf->rbuf_len -= toerase;
|
||||
|
||||
of += toerase;
|
||||
|
||||
/* If segment is now empty, remove it */
|
||||
if (seg->seg_of == 0)
|
||||
if (seg->seg_of == 0) {
|
||||
rbuf->rbuf_erased -= seg->seg_erased;
|
||||
rd_buf_destroy_segment(rbuf, seg);
|
||||
}
|
||||
}
|
||||
|
||||
/* Update absolute offset of remaining segments */
|
||||
for (seg = next; seg; seg = TAILQ_NEXT(seg, seg_link)) {
|
||||
|
|
@ -709,6 +712,7 @@ int rd_buf_write_seek(rd_buf_t *rbuf, size_t absof) {
|
|||
next != seg;) {
|
||||
rd_segment_t *this = next;
|
||||
next = TAILQ_PREV(this, rd_segment_head, seg_link);
|
||||
rbuf->rbuf_erased -= this->seg_erased;
|
||||
rd_buf_destroy_segment(rbuf, this);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2017 Magnus Edenhill
|
||||
* Copyright (c) 2017-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -70,6 +70,8 @@ typedef struct rd_segment_s {
|
|||
* beginning in the grand rd_buf_t */
|
||||
void (*seg_free)(void *p); /**< Optional free function for seg_p */
|
||||
int seg_flags; /**< Segment flags */
|
||||
size_t seg_erased; /** Total number of bytes erased from
|
||||
* this segment. */
|
||||
#define RD_SEGMENT_F_RDONLY 0x1 /**< Read-only segment */
|
||||
#define RD_SEGMENT_F_FREE \
|
||||
0x2 /**< Free segment on destroy, \
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018 Magnus Edenhill
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018 Magnus Edenhill
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2017 Magnus Edenhill
|
||||
* Copyright (c) 2017-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2017 Magnus Edenhill
|
||||
* Copyright (c) 2017-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012-2015 Magnus Edenhill
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -125,16 +125,17 @@
|
|||
#define be32toh(x) (x)
|
||||
#define be16toh(x) (x)
|
||||
#define le32toh(x) \
|
||||
((((x)&0xff) << 24) | (((x)&0xff00) << 8) | (((x)&0xff0000) >> 8) | \
|
||||
(((x)&0xff000000) >> 24))
|
||||
((((x) & 0xff) << 24) | (((x) & 0xff00) << 8) | \
|
||||
(((x) & 0xff0000) >> 8) | (((x) & 0xff000000) >> 24))
|
||||
#define le64toh(x) \
|
||||
((((x)&0x00000000000000ffL) << 56) | \
|
||||
(((x)&0x000000000000ff00L) << 40) | \
|
||||
(((x)&0x0000000000ff0000L) << 24) | \
|
||||
(((x)&0x00000000ff000000L) << 8) | (((x)&0x000000ff00000000L) >> 8) | \
|
||||
(((x)&0x0000ff0000000000L) >> 24) | \
|
||||
(((x)&0x00ff000000000000L) >> 40) | \
|
||||
(((x)&0xff00000000000000L) >> 56))
|
||||
((((x) & 0x00000000000000ffL) << 56) | \
|
||||
(((x) & 0x000000000000ff00L) << 40) | \
|
||||
(((x) & 0x0000000000ff0000L) << 24) | \
|
||||
(((x) & 0x00000000ff000000L) << 8) | \
|
||||
(((x) & 0x000000ff00000000L) >> 8) | \
|
||||
(((x) & 0x0000ff0000000000L) >> 24) | \
|
||||
(((x) & 0x00ff000000000000L) >> 40) | \
|
||||
(((x) & 0xff00000000000000L) >> 56))
|
||||
#else
|
||||
#include <endian.h>
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012-2018, Magnus Edenhill
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012-2020, Magnus Edenhill
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2020 Magnus Edenhill
|
||||
* Copyright (c) 2020-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librd - Rapid Development C library
|
||||
*
|
||||
* Copyright (c) 2012, Magnus Edenhill
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librd - Rapid Development C library
|
||||
*
|
||||
* Copyright (c) 2012, Magnus Edenhill
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018, Magnus Edenhill
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018, Magnus Edenhill
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2021 Magnus Edenhill
|
||||
* Copyright (c) 2021-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -40,6 +40,10 @@
|
|||
#include <curl/curl.h>
|
||||
#include "rdhttp.h"
|
||||
|
||||
#if WITH_SSL
|
||||
#include "rdkafka_ssl.h"
|
||||
#endif
|
||||
|
||||
/** Maximum response size, increase as necessary. */
|
||||
#define RD_HTTP_RESPONSE_SIZE_MAX 1024 * 1024 * 500 /* 500kb */
|
||||
|
||||
|
|
@ -128,8 +132,145 @@ rd_http_req_write_cb(char *ptr, size_t size, size_t nmemb, void *userdata) {
|
|||
return nmemb;
|
||||
}
|
||||
|
||||
rd_http_error_t *rd_http_req_init(rd_http_req_t *hreq, const char *url) {
|
||||
#if WITH_SSL
|
||||
/**
|
||||
* @brief Callback function for setting up the SSL_CTX for HTTPS requests.
|
||||
*
|
||||
* This function sets the default CA paths for the SSL_CTX, and if that fails,
|
||||
* it attempts to probe and set a default CA location. If `probe` is forced
|
||||
* it skips the default CA paths and directly probes for CA certificates.
|
||||
*
|
||||
* On Windows, it attempts to load CA root certificates from the
|
||||
* configured Windows certificate stores before falling back to the default.
|
||||
*
|
||||
* @return `CURLE_OK` on success, or `CURLE_SSL_CACERT_BADFILE` on failure.
|
||||
*/
|
||||
static CURLcode
|
||||
rd_http_ssl_ctx_function(CURL *curl, void *sslctx, void *userptr) {
|
||||
SSL_CTX *ctx = (SSL_CTX *)sslctx;
|
||||
rd_kafka_t *rk = (rd_kafka_t *)userptr;
|
||||
int r = -1;
|
||||
rd_bool_t force_probe =
|
||||
!rd_strcmp(rk->rk_conf.https.ca_location, "probe");
|
||||
rd_bool_t use_probe = force_probe;
|
||||
|
||||
#if WITH_STATIC_LIB_libcrypto
|
||||
/* We fallback to `probe` when statically linked. */
|
||||
use_probe = rd_true;
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
/* Attempt to load CA root certificates from the
|
||||
* configured Windows certificate stores. */
|
||||
r = rd_kafka_ssl_win_load_cert_stores(rk, "https", ctx,
|
||||
rk->rk_conf.ssl.ca_cert_stores);
|
||||
if (r == 0) {
|
||||
rd_kafka_log(rk, LOG_NOTICE, "CERTSTORE",
|
||||
"No CA certificates loaded for `https` from "
|
||||
"Windows certificate stores: "
|
||||
"falling back to default OpenSSL CA paths");
|
||||
r = -1;
|
||||
} else if (r == -1)
|
||||
rd_kafka_log(rk, LOG_NOTICE, "CERTSTORE",
|
||||
"Failed to load CA certificates for `https` from "
|
||||
"Windows certificate stores: "
|
||||
"falling back to default OpenSSL CA paths");
|
||||
|
||||
if (r != -1) {
|
||||
rd_kafka_dbg(rk, SECURITY, "SSL",
|
||||
"Successfully loaded CA certificates for `https` "
|
||||
"from Windows certificate stores");
|
||||
return CURLE_OK; /* Success, CA certs loaded on Windows */
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!force_probe) {
|
||||
/* Previous default behavior: use predefined paths set when
|
||||
* building OpenSSL. */
|
||||
char errstr[512];
|
||||
r = SSL_CTX_set_default_verify_paths(ctx);
|
||||
if (r == 1) {
|
||||
rd_kafka_dbg(rk, SECURITY, "SSL",
|
||||
"SSL_CTX_set_default_verify_paths() "
|
||||
"for `https` "
|
||||
"succeeded");
|
||||
return CURLE_OK; /* Success */
|
||||
}
|
||||
|
||||
/* Read error and clear the error stack. */
|
||||
rd_kafka_ssl_error0(rk, NULL, "https", errstr, sizeof(errstr));
|
||||
rd_kafka_dbg(rk, SECURITY, "SSL",
|
||||
"SSL_CTX_set_default_verify_paths() "
|
||||
"for `https` "
|
||||
"failed: %s",
|
||||
errstr);
|
||||
}
|
||||
|
||||
if (use_probe) {
|
||||
/* We asked for probing or we're using
|
||||
* a statically linked version of OpenSSL. */
|
||||
|
||||
r = rd_kafka_ssl_probe_and_set_default_ca_location(rk, "https",
|
||||
ctx);
|
||||
if (r == 0)
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
return CURLE_SSL_CACERT_BADFILE;
|
||||
}
|
||||
|
||||
static void rd_http_ssl_configure(rd_kafka_t *rk, CURL *hreq_curl) {
|
||||
rd_bool_t force_probe =
|
||||
!rd_strcmp(rk->rk_conf.https.ca_location, "probe");
|
||||
|
||||
if (!force_probe && rk->rk_conf.https.ca_location) {
|
||||
rd_bool_t is_dir;
|
||||
rd_kafka_dbg(rk, SECURITY, "SSL",
|
||||
"Setting `https` CA certs from "
|
||||
"configured location: %s",
|
||||
rk->rk_conf.https.ca_location);
|
||||
if (rd_file_stat(rk->rk_conf.https.ca_location, &is_dir)) {
|
||||
if (is_dir) {
|
||||
curl_easy_setopt(hreq_curl, CURLOPT_CAPATH,
|
||||
rk->rk_conf.https.ca_location);
|
||||
curl_easy_setopt(hreq_curl, CURLOPT_CAINFO,
|
||||
NULL);
|
||||
} else {
|
||||
curl_easy_setopt(hreq_curl, CURLOPT_CAPATH,
|
||||
NULL);
|
||||
curl_easy_setopt(hreq_curl, CURLOPT_CAINFO,
|
||||
rk->rk_conf.https.ca_location);
|
||||
}
|
||||
} else {
|
||||
/* Path doesn't exist, don't set any trusted
|
||||
* certificate. */
|
||||
curl_easy_setopt(hreq_curl, CURLOPT_CAINFO, NULL);
|
||||
curl_easy_setopt(hreq_curl, CURLOPT_CAPATH, NULL);
|
||||
}
|
||||
} else if (!force_probe && rk->rk_conf.https.ca_pem) {
|
||||
#if CURL_AT_LEAST_VERSION(7, 77, 0)
|
||||
struct curl_blob ca_blob = {
|
||||
.data = rk->rk_conf.https.ca_pem,
|
||||
.len = strlen(rk->rk_conf.https.ca_pem),
|
||||
.flags = CURL_BLOB_COPY};
|
||||
rd_kafka_dbg(rk, SECURITY, "SSL",
|
||||
"Setting `https` CA certs from "
|
||||
"configured PEM string");
|
||||
curl_easy_setopt(hreq_curl, CURLOPT_CAINFO_BLOB, &ca_blob);
|
||||
#endif
|
||||
/* Only the blob should be set, no default paths. */
|
||||
curl_easy_setopt(hreq_curl, CURLOPT_CAINFO, NULL);
|
||||
curl_easy_setopt(hreq_curl, CURLOPT_CAPATH, NULL);
|
||||
} else {
|
||||
curl_easy_setopt(hreq_curl, CURLOPT_SSL_CTX_FUNCTION,
|
||||
rd_http_ssl_ctx_function);
|
||||
curl_easy_setopt(hreq_curl, CURLOPT_SSL_CTX_DATA, rk);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
rd_http_error_t *
|
||||
rd_http_req_init(rd_kafka_t *rk, rd_http_req_t *hreq, const char *url) {
|
||||
memset(hreq, 0, sizeof(*hreq));
|
||||
|
||||
hreq->hreq_curl = curl_easy_init();
|
||||
|
|
@ -139,8 +280,15 @@ rd_http_error_t *rd_http_req_init(rd_http_req_t *hreq, const char *url) {
|
|||
hreq->hreq_buf = rd_buf_new(1, 1024);
|
||||
|
||||
curl_easy_setopt(hreq->hreq_curl, CURLOPT_URL, url);
|
||||
#if CURL_AT_LEAST_VERSION(7, 85, 0)
|
||||
curl_easy_setopt(hreq->hreq_curl, CURLOPT_PROTOCOLS_STR, "http,https");
|
||||
#else
|
||||
/* As of 06/10/2025 Debian 10 and CentOS Stream 9 ship with
|
||||
* older CURL versions, remove this condition once they're not supported
|
||||
* anymore. */
|
||||
curl_easy_setopt(hreq->hreq_curl, CURLOPT_PROTOCOLS,
|
||||
CURLPROTO_HTTP | CURLPROTO_HTTPS);
|
||||
#endif
|
||||
curl_easy_setopt(hreq->hreq_curl, CURLOPT_MAXREDIRS, 16);
|
||||
curl_easy_setopt(hreq->hreq_curl, CURLOPT_TIMEOUT, 30);
|
||||
curl_easy_setopt(hreq->hreq_curl, CURLOPT_ERRORBUFFER,
|
||||
|
|
@ -150,6 +298,10 @@ rd_http_error_t *rd_http_req_init(rd_http_req_t *hreq, const char *url) {
|
|||
rd_http_req_write_cb);
|
||||
curl_easy_setopt(hreq->hreq_curl, CURLOPT_WRITEDATA, (void *)hreq);
|
||||
|
||||
#if WITH_SSL
|
||||
rd_http_ssl_configure(rk, hreq->hreq_curl);
|
||||
#endif
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -200,13 +352,14 @@ const char *rd_http_req_get_content_type(rd_http_req_t *hreq) {
|
|||
* by calling rd_http_error_destroy(). In case of HTTP error the \p *rbufp
|
||||
* may be filled with the error response.
|
||||
*/
|
||||
rd_http_error_t *rd_http_get(const char *url, rd_buf_t **rbufp) {
|
||||
rd_http_error_t *
|
||||
rd_http_get(rd_kafka_t *rk, const char *url, rd_buf_t **rbufp) {
|
||||
rd_http_req_t hreq;
|
||||
rd_http_error_t *herr;
|
||||
|
||||
*rbufp = NULL;
|
||||
|
||||
herr = rd_http_req_init(&hreq, url);
|
||||
herr = rd_http_req_init(rk, &hreq, url);
|
||||
if (unlikely(herr != NULL))
|
||||
return herr;
|
||||
|
||||
|
|
@ -269,6 +422,7 @@ static rd_bool_t rd_http_is_failure_temporary(int error_code) {
|
|||
switch (error_code) {
|
||||
case 408: /**< Request timeout */
|
||||
case 425: /**< Too early */
|
||||
case 429: /**< Too many requests */
|
||||
case 500: /**< Internal server error */
|
||||
case 502: /**< Bad gateway */
|
||||
case 503: /**< Service unavailable */
|
||||
|
|
@ -309,7 +463,7 @@ rd_http_error_t *rd_http_post_expect_json(rd_kafka_t *rk,
|
|||
size_t len;
|
||||
const char *content_type;
|
||||
|
||||
herr = rd_http_req_init(&hreq, url);
|
||||
herr = rd_http_req_init(rk, &hreq, url);
|
||||
if (unlikely(herr != NULL))
|
||||
return herr;
|
||||
|
||||
|
|
@ -374,7 +528,8 @@ rd_http_error_t *rd_http_post_expect_json(rd_kafka_t *rk,
|
|||
*
|
||||
* Same error semantics as rd_http_get().
|
||||
*/
|
||||
rd_http_error_t *rd_http_get_json(const char *url, cJSON **jsonp) {
|
||||
rd_http_error_t *
|
||||
rd_http_get_json(rd_kafka_t *rk, const char *url, cJSON **jsonp) {
|
||||
rd_http_req_t hreq;
|
||||
rd_http_error_t *herr;
|
||||
rd_slice_t slice;
|
||||
|
|
@ -385,7 +540,7 @@ rd_http_error_t *rd_http_get_json(const char *url, cJSON **jsonp) {
|
|||
|
||||
*jsonp = NULL;
|
||||
|
||||
herr = rd_http_req_init(&hreq, url);
|
||||
herr = rd_http_req_init(rk, &hreq, url);
|
||||
if (unlikely(herr != NULL))
|
||||
return herr;
|
||||
|
||||
|
|
@ -460,19 +615,21 @@ int unittest_http(void) {
|
|||
cJSON *json, *jval;
|
||||
rd_http_error_t *herr;
|
||||
rd_bool_t empty;
|
||||
rd_kafka_t *rk;
|
||||
|
||||
if (!base_url || !*base_url)
|
||||
RD_UT_SKIP("RD_UT_HTTP_URL environment variable not set");
|
||||
|
||||
RD_UT_BEGIN();
|
||||
|
||||
rk = rd_calloc(1, sizeof(*rk));
|
||||
error_url_size = strlen(base_url) + strlen("/error") + 1;
|
||||
error_url = rd_alloca(error_url_size);
|
||||
rd_snprintf(error_url, error_url_size, "%s/error", base_url);
|
||||
|
||||
/* Try the base url first, parse its JSON and extract a key-value. */
|
||||
json = NULL;
|
||||
herr = rd_http_get_json(base_url, &json);
|
||||
herr = rd_http_get_json(rk, base_url, &json);
|
||||
RD_UT_ASSERT(!herr, "Expected get_json(%s) to succeed, got: %s",
|
||||
base_url, herr->errstr);
|
||||
|
||||
|
|
@ -492,7 +649,7 @@ int unittest_http(void) {
|
|||
|
||||
/* Try the error URL, verify error code. */
|
||||
json = NULL;
|
||||
herr = rd_http_get_json(error_url, &json);
|
||||
herr = rd_http_get_json(rk, error_url, &json);
|
||||
RD_UT_ASSERT(herr != NULL, "Expected get_json(%s) to fail", error_url);
|
||||
RD_UT_ASSERT(herr->code >= 400,
|
||||
"Expected get_json(%s) error code >= "
|
||||
|
|
@ -506,6 +663,7 @@ int unittest_http(void) {
|
|||
if (json)
|
||||
cJSON_Delete(json);
|
||||
rd_http_error_destroy(herr);
|
||||
rd_free(rk);
|
||||
|
||||
RD_UT_PASS();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2021 Magnus Edenhill
|
||||
* Copyright (c) 2021-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -42,8 +42,9 @@ typedef struct rd_http_error_s {
|
|||
|
||||
void rd_http_error_destroy(rd_http_error_t *herr);
|
||||
|
||||
rd_http_error_t *rd_http_get(const char *url, rd_buf_t **rbufp);
|
||||
rd_http_error_t *rd_http_get_json(const char *url, cJSON **jsonp);
|
||||
rd_http_error_t *rd_http_get(rd_kafka_t *rk, const char *url, rd_buf_t **rbufp);
|
||||
rd_http_error_t *
|
||||
rd_http_get_json(rd_kafka_t *rk, const char *url, cJSON **jsonp);
|
||||
|
||||
void rd_http_global_init(void);
|
||||
|
||||
|
|
@ -62,7 +63,8 @@ typedef struct rd_http_req_s {
|
|||
* write to. */
|
||||
} rd_http_req_t;
|
||||
|
||||
rd_http_error_t *rd_http_req_init(rd_http_req_t *hreq, const char *url);
|
||||
rd_http_error_t *
|
||||
rd_http_req_init(rd_kafka_t *rk, rd_http_req_t *hreq, const char *url);
|
||||
rd_http_error_t *rd_http_req_perform_sync(rd_http_req_t *hreq);
|
||||
rd_http_error_t *rd_http_parse_json(rd_http_req_t *hreq, cJSON **jsonp);
|
||||
rd_http_error_t *rd_http_post_expect_json(rd_kafka_t *rk,
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018 Magnus Edenhill
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -30,6 +31,7 @@
|
|||
#define _RDINTERVAL_H_
|
||||
|
||||
#include "rd.h"
|
||||
#include "rdrand.h"
|
||||
|
||||
typedef struct rd_interval_s {
|
||||
rd_ts_t ri_ts_last; /* last interval timestamp */
|
||||
|
|
@ -109,6 +111,22 @@ static RD_INLINE RD_UNUSED void rd_interval_reset_to_now(rd_interval_t *ri,
|
|||
ri->ri_backoff = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the interval to 'now' with the given backoff ms and max_jitter as
|
||||
* percentage. The backoff is given just for absolute jitter calculation. If now
|
||||
* is 0, the time will be gathered automatically.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED void
|
||||
rd_interval_reset_to_now_with_jitter(rd_interval_t *ri,
|
||||
rd_ts_t now,
|
||||
int64_t backoff_ms,
|
||||
int max_jitter) {
|
||||
rd_interval_reset_to_now(ri, now);
|
||||
/* We are multiplying by 10 as (backoff_ms * percent * 1000)/100 ->
|
||||
* backoff_ms * jitter * 10 */
|
||||
ri->ri_backoff = backoff_ms * rd_jitter(-max_jitter, max_jitter) * 10;
|
||||
}
|
||||
|
||||
/**
|
||||
* Back off the next interval by `backoff_us` microseconds.
|
||||
*/
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018 Magnus Edenhill
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* 2023, Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -31,10 +32,23 @@
|
|||
|
||||
|
||||
#include "rdstring.h"
|
||||
#include "rdmap.h"
|
||||
#include "rdkafka_error.h"
|
||||
#include "rdkafka_confval.h"
|
||||
|
||||
#if WITH_SSL && OPENSSL_VERSION_NUMBER >= 0x10101000L
|
||||
#include <openssl/rand.h>
|
||||
#endif
|
||||
|
||||
#if WITH_SSL
|
||||
typedef struct rd_kafka_broker_s rd_kafka_broker_t;
|
||||
extern int rd_kafka_ssl_hmac(rd_kafka_broker_t *rkb,
|
||||
const EVP_MD *evp,
|
||||
const rd_chariov_t *in,
|
||||
const rd_chariov_t *salt,
|
||||
int itcnt,
|
||||
rd_chariov_t *out);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Common AdminOptions type used for all admin APIs.
|
||||
|
|
@ -69,13 +83,7 @@ struct rd_kafka_AdminOptions_s {
|
|||
* CreateTopics
|
||||
* CreatePartitions
|
||||
* AlterConfigs
|
||||
*/
|
||||
|
||||
rd_kafka_confval_t incremental; /**< BOOL: Incremental rather than
|
||||
* absolute application
|
||||
* of config.
|
||||
* Valid for:
|
||||
* AlterConfigs
|
||||
* IncrementalAlterConfigs
|
||||
*/
|
||||
|
||||
rd_kafka_confval_t broker; /**< INT: Explicitly override
|
||||
|
|
@ -91,6 +99,14 @@ struct rd_kafka_AdminOptions_s {
|
|||
* Valid for:
|
||||
* ListConsumerGroupOffsets
|
||||
*/
|
||||
rd_kafka_confval_t
|
||||
include_authorized_operations; /**< BOOL: Whether broker should
|
||||
* return authorized operations.
|
||||
* Valid for:
|
||||
* DescribeConsumerGroups
|
||||
* DescribeCluster
|
||||
* DescribeTopics
|
||||
*/
|
||||
|
||||
rd_kafka_confval_t
|
||||
match_consumer_group_states; /**< PTR: list of consumer group states
|
||||
|
|
@ -98,6 +114,19 @@ struct rd_kafka_AdminOptions_s {
|
|||
* Valid for: ListConsumerGroups.
|
||||
*/
|
||||
|
||||
rd_kafka_confval_t
|
||||
match_consumer_group_types; /**< PTR: list of consumer group types
|
||||
* to query for.
|
||||
* Valid for: ListConsumerGroups.
|
||||
*/
|
||||
|
||||
rd_kafka_confval_t
|
||||
isolation_level; /**< INT:Isolation Level needed for list Offset
|
||||
* to query for.
|
||||
* Default Set to
|
||||
* RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED
|
||||
*/
|
||||
|
||||
rd_kafka_confval_t opaque; /**< PTR: Application opaque.
|
||||
* Valid for all. */
|
||||
};
|
||||
|
|
@ -188,13 +217,6 @@ struct rd_kafka_NewPartitions_s {
|
|||
* @{
|
||||
*/
|
||||
|
||||
/* KIP-248 */
|
||||
typedef enum rd_kafka_AlterOperation_t {
|
||||
RD_KAFKA_ALTER_OP_ADD = 0,
|
||||
RD_KAFKA_ALTER_OP_SET = 1,
|
||||
RD_KAFKA_ALTER_OP_DELETE = 2,
|
||||
} rd_kafka_AlterOperation_t;
|
||||
|
||||
struct rd_kafka_ConfigEntry_s {
|
||||
rd_strtup_t *kv; /**< Name/Value pair */
|
||||
|
||||
|
|
@ -202,7 +224,8 @@ struct rd_kafka_ConfigEntry_s {
|
|||
|
||||
/* Attributes: this is a struct for easy copying */
|
||||
struct {
|
||||
rd_kafka_AlterOperation_t operation; /**< Operation */
|
||||
/** Operation type, used for IncrementalAlterConfigs */
|
||||
rd_kafka_AlterConfigOpType_t op_type;
|
||||
rd_kafka_ConfigSource_t source; /**< Config source */
|
||||
rd_bool_t is_readonly; /**< Value is read-only (on broker) */
|
||||
rd_bool_t is_default; /**< Value is at its default */
|
||||
|
|
@ -250,12 +273,48 @@ struct rd_kafka_AlterConfigs_result_s {
|
|||
rd_list_t resources; /**< Type (rd_kafka_ConfigResource_t *) */
|
||||
};
|
||||
|
||||
struct rd_kafka_IncrementalAlterConfigs_result_s {
|
||||
rd_list_t resources; /**< Type (rd_kafka_ConfigResource_t *) */
|
||||
};
|
||||
|
||||
struct rd_kafka_ConfigResource_result_s {
|
||||
rd_list_t resources; /**< Type (struct rd_kafka_ConfigResource *):
|
||||
* List of config resources, sans config
|
||||
* but with response error values. */
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Resource type specific to config apis.
|
||||
*/
|
||||
typedef enum rd_kafka_ConfigResourceType_t {
|
||||
RD_KAFKA_CONFIG_RESOURCE_UNKNOWN = 0,
|
||||
RD_KAFKA_CONFIG_RESOURCE_TOPIC = 2,
|
||||
RD_KAFKA_CONFIG_RESOURCE_BROKER = 4,
|
||||
RD_KAFKA_CONFIG_RESOURCE_GROUP = 32,
|
||||
} rd_kafka_ConfigResourceType_t;
|
||||
|
||||
/**
|
||||
* @brief Maps `rd_kafka_ResourceType_t` to `rd_kafka_ConfigResourceType_t`
|
||||
* for Config Apis. We are incorrectly using `rd_kafka_ResourceType_t` in
|
||||
* both Config Apis and ACL Apis. So, we need this function to map the
|
||||
* resource type internally to `rd_kafka_ConfigResourceType_t`. Like the
|
||||
* enum value for `GROUP` is 32 in Config Apis, but it is 3 for ACL Apis.
|
||||
*/
|
||||
rd_kafka_ConfigResourceType_t
|
||||
rd_kafka_ResourceType_to_ConfigResourceType(rd_kafka_ResourceType_t restype);
|
||||
|
||||
/**
|
||||
* @brief Maps `rd_kafka_ConfigResourceType_t` to `rd_kafka_ResourceType_t`
|
||||
* for Config Apis. We are incorrectly using `rd_kafka_ResourceType_t` in
|
||||
* both Config Apis and ACL Apis. So, we need this function to map the
|
||||
* `rd_kafka_ConfigResourceType_t` internally to
|
||||
* `rd_kafka_ResourceType_t`. Like the enum value for `GROUP` is 32 in
|
||||
* Config Apis, but it is 3 for ACL Apis.
|
||||
*/
|
||||
rd_kafka_ResourceType_t rd_kafka_ConfigResourceType_to_ResourceType(
|
||||
rd_kafka_ConfigResourceType_t config_resource_type);
|
||||
|
||||
|
||||
/**@}*/
|
||||
|
||||
|
||||
|
|
@ -298,6 +357,47 @@ struct rd_kafka_DeleteRecords_s {
|
|||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name ListConsumerGroupOffsets
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief ListConsumerGroupOffsets result
|
||||
*/
|
||||
struct rd_kafka_ListConsumerGroupOffsets_result_s {
|
||||
rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */
|
||||
};
|
||||
|
||||
struct rd_kafka_ListConsumerGroupOffsets_s {
|
||||
char *group_id; /**< Points to data */
|
||||
rd_kafka_topic_partition_list_t *partitions;
|
||||
char data[1]; /**< The group id is allocated along with
|
||||
* the struct here. */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name AlterConsumerGroupOffsets
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief AlterConsumerGroupOffsets result
|
||||
*/
|
||||
struct rd_kafka_AlterConsumerGroupOffsets_result_s {
|
||||
rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */
|
||||
};
|
||||
|
||||
struct rd_kafka_AlterConsumerGroupOffsets_s {
|
||||
char *group_id; /**< Points to data */
|
||||
rd_kafka_topic_partition_list_t *partitions;
|
||||
char data[1]; /**< The group id is allocated along with
|
||||
* the struct here. */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name DeleteConsumerGroupOffsets
|
||||
|
|
@ -320,6 +420,24 @@ struct rd_kafka_DeleteConsumerGroupOffsets_s {
|
|||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name ListOffsets
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @struct ListOffsets result about a single partition
|
||||
*/
|
||||
struct rd_kafka_ListOffsetsResultInfo_s {
|
||||
rd_kafka_topic_partition_t *topic_partition;
|
||||
int64_t timestamp;
|
||||
};
|
||||
|
||||
rd_kafka_ListOffsetsResultInfo_t *
|
||||
rd_kafka_ListOffsetsResultInfo_new(rd_kafka_topic_partition_t *rktpar,
|
||||
rd_ts_t timestamp);
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name CreateAcls
|
||||
* @{
|
||||
|
|
@ -357,50 +475,6 @@ struct rd_kafka_DeleteAcls_result_response_s {
|
|||
|
||||
/**@}*/
|
||||
|
||||
|
||||
/**
|
||||
* @name AlterConsumerGroupOffsets
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief AlterConsumerGroupOffsets result
|
||||
*/
|
||||
struct rd_kafka_AlterConsumerGroupOffsets_result_s {
|
||||
rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */
|
||||
};
|
||||
|
||||
struct rd_kafka_AlterConsumerGroupOffsets_s {
|
||||
char *group_id; /**< Points to data */
|
||||
rd_kafka_topic_partition_list_t *partitions;
|
||||
char data[1]; /**< The group id is allocated along with
|
||||
* the struct here. */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
||||
|
||||
/**
|
||||
* @name ListConsumerGroupOffsets
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief ListConsumerGroupOffsets result
|
||||
*/
|
||||
struct rd_kafka_ListConsumerGroupOffsets_result_s {
|
||||
rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */
|
||||
};
|
||||
|
||||
struct rd_kafka_ListConsumerGroupOffsets_s {
|
||||
char *group_id; /**< Points to data */
|
||||
rd_kafka_topic_partition_list_t *partitions;
|
||||
char data[1]; /**< The group id is allocated along with
|
||||
* the struct here. */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name ListConsumerGroups
|
||||
* @{
|
||||
|
|
@ -414,6 +488,7 @@ struct rd_kafka_ConsumerGroupListing_s {
|
|||
/** Is it a simple consumer group? That means empty protocol_type. */
|
||||
rd_bool_t is_simple_consumer_group;
|
||||
rd_kafka_consumer_group_state_t state; /**< Consumer group state. */
|
||||
rd_kafka_consumer_group_type_t type; /**< Consumer group type. */
|
||||
};
|
||||
|
||||
|
||||
|
|
@ -452,6 +527,9 @@ struct rd_kafka_MemberDescription_s {
|
|||
char *group_instance_id; /**< Group instance id */
|
||||
char *host; /**< Group member host */
|
||||
rd_kafka_MemberAssignment_t assignment; /**< Member assignment */
|
||||
rd_kafka_MemberAssignment_t
|
||||
*target_assignment; /**< Target assignment. `NULL` for `classic`
|
||||
protocol */
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -471,12 +549,113 @@ struct rd_kafka_ConsumerGroupDescription_s {
|
|||
char *partition_assignor;
|
||||
/** Consumer group state. */
|
||||
rd_kafka_consumer_group_state_t state;
|
||||
/** Consumer group type. */
|
||||
rd_kafka_consumer_group_type_t type;
|
||||
/** Consumer group coordinator. */
|
||||
rd_kafka_Node_t *coordinator;
|
||||
/** Count of operations allowed for topic. -1 indicates operations not
|
||||
* requested.*/
|
||||
int authorized_operations_cnt;
|
||||
/** Operations allowed for topic. May be NULL if operations were not
|
||||
* requested */
|
||||
rd_kafka_AclOperation_t *authorized_operations;
|
||||
/** Group specific error. */
|
||||
rd_kafka_error_t *error;
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name DescribeTopics
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief TopicCollection contains a list of topics.
|
||||
*
|
||||
*/
|
||||
struct rd_kafka_TopicCollection_s {
|
||||
char **topics; /**< List of topic names. */
|
||||
size_t topics_cnt; /**< Count of topic names. */
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief TopicPartition result type in DescribeTopics result.
|
||||
*
|
||||
*/
|
||||
struct rd_kafka_TopicPartitionInfo_s {
|
||||
int partition; /**< Partition id. */
|
||||
rd_kafka_Node_t *leader; /**< Leader of the partition. */
|
||||
size_t isr_cnt; /**< Count of insync replicas. */
|
||||
rd_kafka_Node_t **isr; /**< List of in sync replica nodes. */
|
||||
size_t replica_cnt; /**< Count of partition replicas. */
|
||||
rd_kafka_Node_t **replicas; /**< List of replica nodes. */
|
||||
};
|
||||
|
||||
/**
|
||||
* @struct DescribeTopics result
|
||||
*/
|
||||
struct rd_kafka_TopicDescription_s {
|
||||
char *topic; /**< Topic name */
|
||||
rd_kafka_Uuid_t topic_id; /**< Topic Id */
|
||||
int partition_cnt; /**< Number of partitions in \p partitions*/
|
||||
rd_bool_t is_internal; /**< Is the topic is internal to Kafka? */
|
||||
rd_kafka_TopicPartitionInfo_t **partitions; /**< Partitions */
|
||||
rd_kafka_error_t *error; /**< Topic error reported by broker */
|
||||
int authorized_operations_cnt; /**< Count of operations allowed for
|
||||
* topic. -1 indicates operations not
|
||||
* requested. */
|
||||
rd_kafka_AclOperation_t
|
||||
*authorized_operations; /**< Operations allowed for topic. May be
|
||||
* NULL if operations were not requested */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name DescribeCluster
|
||||
* @{
|
||||
*/
|
||||
/**
|
||||
* @struct DescribeCluster result - internal type.
|
||||
*/
|
||||
typedef struct rd_kafka_ClusterDescription_s {
|
||||
char *cluster_id; /**< Cluster id */
|
||||
rd_kafka_Node_t *controller; /**< Current controller. */
|
||||
size_t node_cnt; /**< Count of brokers in the cluster. */
|
||||
rd_kafka_Node_t **nodes; /**< Brokers in the cluster. */
|
||||
int authorized_operations_cnt; /**< Count of operations allowed for
|
||||
* cluster. -1 indicates operations not
|
||||
* requested. */
|
||||
rd_kafka_AclOperation_t
|
||||
*authorized_operations; /**< Operations allowed for cluster. May be
|
||||
* NULL if operations were not requested */
|
||||
|
||||
} rd_kafka_ClusterDescription_t;
|
||||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name ElectLeaders
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @struct ElectLeaders request object
|
||||
*/
|
||||
struct rd_kafka_ElectLeaders_s {
|
||||
rd_kafka_ElectionType_t election_type; /*Election Type*/
|
||||
rd_kafka_topic_partition_list_t
|
||||
*partitions; /*TopicPartitions for election*/
|
||||
};
|
||||
|
||||
/**
|
||||
* @struct ElectLeaders result object
|
||||
*/
|
||||
typedef struct rd_kafka_ElectLeadersResult_s {
|
||||
rd_list_t partitions; /**< Type (rd_kafka_topic_partition_result_t *) */
|
||||
} rd_kafka_ElectLeadersResult_t;
|
||||
|
||||
/**@}*/
|
||||
|
||||
#endif /* _RDKAFKA_ADMIN_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2020 Magnus Edenhill
|
||||
* Copyright (c) 2020-2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -135,7 +136,9 @@ rd_kafka_assignment_apply_offsets(rd_kafka_t *rk,
|
|||
rd_kafka_topic_partition_t *rktpar;
|
||||
|
||||
RD_KAFKA_TPLIST_FOREACH(rktpar, offsets) {
|
||||
rd_kafka_toppar_t *rktp = rktpar->_private; /* May be NULL */
|
||||
/* May be NULL, borrow ref. */
|
||||
rd_kafka_toppar_t *rktp =
|
||||
rd_kafka_topic_partition_toppar(rk, rktpar);
|
||||
|
||||
if (!rd_kafka_topic_partition_list_del(
|
||||
rk->rk_consumer.assignment.queried, rktpar->topic,
|
||||
|
|
@ -150,8 +153,30 @@ rd_kafka_assignment_apply_offsets(rd_kafka_t *rk,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (err == RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT ||
|
||||
rktpar->err == RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) {
|
||||
if (err == RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH ||
|
||||
rktpar->err == RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH) {
|
||||
rd_kafka_topic_partition_t *rktpar_copy;
|
||||
|
||||
rd_kafka_dbg(rk, CGRP, "OFFSETFETCH",
|
||||
"Adding %s [%" PRId32
|
||||
"] back to pending "
|
||||
"list because of stale member epoch",
|
||||
rktpar->topic, rktpar->partition);
|
||||
|
||||
rktpar_copy = rd_kafka_topic_partition_list_add_copy(
|
||||
rk->rk_consumer.assignment.pending, rktpar);
|
||||
/* Need to reset offset to STORED to query for
|
||||
* the committed offset again. If the offset is
|
||||
* kept INVALID then auto.offset.reset will be
|
||||
* triggered.
|
||||
*
|
||||
* Not necessary if err is UNSTABLE_OFFSET_COMMIT
|
||||
* because the buffer is retried there. */
|
||||
rktpar_copy->offset = RD_KAFKA_OFFSET_STORED;
|
||||
|
||||
} else if (err == RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT ||
|
||||
rktpar->err ==
|
||||
RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) {
|
||||
/* Ongoing transactions are blocking offset retrieval.
|
||||
* This is typically retried from the OffsetFetch
|
||||
* handler but we can come here if the assignment
|
||||
|
|
@ -207,7 +232,9 @@ rd_kafka_assignment_apply_offsets(rd_kafka_t *rk,
|
|||
/* Do nothing for request-level errors (err is set). */
|
||||
}
|
||||
|
||||
if (offsets->cnt > 0)
|
||||
/* In case of stale member epoch we retry to serve the
|
||||
* assignment only after a successful ConsumerGroupHeartbeat. */
|
||||
if (offsets->cnt > 0 && err != RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH)
|
||||
rd_kafka_assignment_serve(rk);
|
||||
}
|
||||
|
||||
|
|
@ -271,18 +298,32 @@ static void rd_kafka_assignment_handle_OffsetFetch(rd_kafka_t *rk,
|
|||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
if (err) {
|
||||
rd_kafka_dbg(rk, CGRP, "OFFSET",
|
||||
switch (err) {
|
||||
case RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH:
|
||||
rk->rk_cgrp->rkcg_consumer_flags |=
|
||||
RD_KAFKA_CGRP_CONSUMER_F_SERVE_PENDING;
|
||||
rd_kafka_cgrp_consumer_expedite_next_heartbeat(
|
||||
rk->rk_cgrp,
|
||||
"OffsetFetch error: Stale member epoch");
|
||||
break;
|
||||
case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID:
|
||||
rd_kafka_cgrp_consumer_expedite_next_heartbeat(
|
||||
rk->rk_cgrp, "OffsetFetch error: Unknown member");
|
||||
break;
|
||||
default:
|
||||
rd_kafka_dbg(
|
||||
rk, CGRP, "OFFSET",
|
||||
"Offset fetch error for %d partition(s): %s",
|
||||
offsets->cnt, rd_kafka_err2str(err));
|
||||
rd_kafka_consumer_err(
|
||||
rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0, NULL,
|
||||
NULL, RD_KAFKA_OFFSET_INVALID,
|
||||
rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0,
|
||||
NULL, NULL, RD_KAFKA_OFFSET_INVALID,
|
||||
"Failed to fetch committed offsets for "
|
||||
"%d partition(s) in group \"%s\": %s",
|
||||
offsets->cnt, rk->rk_group_id->str, rd_kafka_err2str(err));
|
||||
offsets->cnt, rk->rk_group_id->str,
|
||||
rd_kafka_err2str(err));
|
||||
}
|
||||
}
|
||||
|
||||
/* Apply the fetched offsets to the assignment */
|
||||
|
|
@ -302,7 +343,9 @@ static int rd_kafka_assignment_serve_removals(rd_kafka_t *rk) {
|
|||
int valid_offsets = 0;
|
||||
|
||||
RD_KAFKA_TPLIST_FOREACH(rktpar, rk->rk_consumer.assignment.removed) {
|
||||
rd_kafka_toppar_t *rktp = rktpar->_private; /* Borrow ref */
|
||||
rd_kafka_toppar_t *rktp =
|
||||
rd_kafka_topic_partition_ensure_toppar(
|
||||
rk, rktpar, rd_true); /* Borrow ref */
|
||||
int was_pending, was_queried;
|
||||
|
||||
/* Remove partition from pending and querying lists,
|
||||
|
|
@ -333,17 +376,21 @@ static int rd_kafka_assignment_serve_removals(rd_kafka_t *rk) {
|
|||
|
||||
rd_kafka_toppar_lock(rktp);
|
||||
|
||||
/* Save the currently stored offset on .removed
|
||||
/* Save the currently stored offset and epoch on .removed
|
||||
* so it will be committed below. */
|
||||
rktpar->offset = rktp->rktp_stored_offset;
|
||||
rd_kafka_topic_partition_set_from_fetch_pos(
|
||||
rktpar, rktp->rktp_stored_pos);
|
||||
rd_kafka_topic_partition_set_metadata_from_rktp_stored(rktpar,
|
||||
rktp);
|
||||
valid_offsets += !RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset);
|
||||
|
||||
/* Reset the stored offset to invalid so that
|
||||
* a manual offset-less commit() or the auto-committer
|
||||
* will not commit a stored offset from a previous
|
||||
* assignment (issue #2782). */
|
||||
rd_kafka_offset_store0(rktp, RD_KAFKA_OFFSET_INVALID, rd_true,
|
||||
RD_DONT_LOCK);
|
||||
rd_kafka_offset_store0(
|
||||
rktp, RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID, -1), NULL,
|
||||
0, rd_true, RD_DONT_LOCK);
|
||||
|
||||
/* Partition is no longer desired */
|
||||
rd_kafka_toppar_desired_del(rktp);
|
||||
|
|
@ -422,7 +469,9 @@ static int rd_kafka_assignment_serve_pending(rd_kafka_t *rk) {
|
|||
for (i = rk->rk_consumer.assignment.pending->cnt - 1; i >= 0; i--) {
|
||||
rd_kafka_topic_partition_t *rktpar =
|
||||
&rk->rk_consumer.assignment.pending->elems[i];
|
||||
rd_kafka_toppar_t *rktp = rktpar->_private; /* Borrow ref */
|
||||
/* Borrow ref */
|
||||
rd_kafka_toppar_t *rktp =
|
||||
rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true);
|
||||
|
||||
rd_assert(!rktp->rktp_started);
|
||||
|
||||
|
|
@ -443,9 +492,11 @@ static int rd_kafka_assignment_serve_pending(rd_kafka_t *rk) {
|
|||
|
||||
rd_kafka_dbg(rk, CGRP, "SRVPEND",
|
||||
"Starting pending assigned partition "
|
||||
"%s [%" PRId32 "] at offset %s",
|
||||
"%s [%" PRId32 "] at %s",
|
||||
rktpar->topic, rktpar->partition,
|
||||
rd_kafka_offset2str(rktpar->offset));
|
||||
rd_kafka_fetch_pos2str(
|
||||
rd_kafka_topic_partition_get_fetch_pos(
|
||||
rktpar)));
|
||||
|
||||
/* Reset the (lib) pause flag which may have been set by
|
||||
* the cgrp when scheduling the rebalance callback. */
|
||||
|
|
@ -457,9 +508,10 @@ static int rd_kafka_assignment_serve_pending(rd_kafka_t *rk) {
|
|||
rktp->rktp_started = rd_true;
|
||||
rk->rk_consumer.assignment.started_cnt++;
|
||||
|
||||
rd_kafka_toppar_op_fetch_start(rktp, rktpar->offset,
|
||||
rk->rk_consumer.q,
|
||||
RD_KAFKA_NO_REPLYQ);
|
||||
rd_kafka_toppar_op_fetch_start(
|
||||
rktp,
|
||||
rd_kafka_topic_partition_get_fetch_pos(rktpar),
|
||||
rk->rk_consumer.q, RD_KAFKA_NO_REPLYQ);
|
||||
|
||||
|
||||
} else if (can_query_offsets) {
|
||||
|
|
@ -529,7 +581,8 @@ static int rd_kafka_assignment_serve_pending(rd_kafka_t *rk) {
|
|||
partitions_to_query->cnt);
|
||||
|
||||
rd_kafka_OffsetFetchRequest(
|
||||
coord, rk->rk_group_id->str, partitions_to_query,
|
||||
coord, rk->rk_group_id->str, partitions_to_query, rd_false,
|
||||
-1, NULL,
|
||||
rk->rk_conf.isolation_level ==
|
||||
RD_KAFKA_READ_COMMITTED /*require_stable_offsets*/,
|
||||
0, /* Timeout */
|
||||
|
|
@ -733,8 +786,9 @@ rd_kafka_assignment_add(rd_kafka_t *rk,
|
|||
|
||||
/* Reset the stored offset to INVALID to avoid the race
|
||||
* condition described in rdkafka_offset.h */
|
||||
rd_kafka_offset_store0(rktp, RD_KAFKA_OFFSET_INVALID,
|
||||
rd_true /* force */, RD_DONT_LOCK);
|
||||
rd_kafka_offset_store0(
|
||||
rktp, RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID, -1), NULL,
|
||||
0, rd_true /* force */, RD_DONT_LOCK);
|
||||
|
||||
rd_kafka_toppar_unlock(rktp);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2020 Magnus Edenhill
|
||||
* Copyright (c) 2020-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2015 Magnus Edenhill
|
||||
* Copyright (c) 2015-2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -59,6 +60,9 @@ void rd_kafka_group_member_clear(rd_kafka_group_member_t *rkgm) {
|
|||
if (rkgm->rkgm_member_metadata)
|
||||
rd_kafkap_bytes_destroy(rkgm->rkgm_member_metadata);
|
||||
|
||||
if (rkgm->rkgm_rack_id)
|
||||
rd_kafkap_str_destroy(rkgm->rkgm_rack_id);
|
||||
|
||||
memset(rkgm, 0, sizeof(*rkgm));
|
||||
}
|
||||
|
||||
|
|
@ -106,7 +110,9 @@ rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new(
|
|||
const rd_list_t *topics,
|
||||
const void *userdata,
|
||||
size_t userdata_size,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions) {
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions,
|
||||
int generation,
|
||||
const rd_kafkap_str_t *rack_id) {
|
||||
|
||||
rd_kafka_buf_t *rkbuf;
|
||||
rd_kafkap_bytes_t *kbytes;
|
||||
|
|
@ -124,12 +130,14 @@ rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new(
|
|||
* OwnedPartitions => [Topic Partitions] // added in v1
|
||||
* Topic => string
|
||||
* Partitions => [int32]
|
||||
* GenerationId => int32 // added in v2
|
||||
* RackId => string // added in v3
|
||||
*/
|
||||
|
||||
rkbuf = rd_kafka_buf_new(1, 100 + (topic_cnt * 100) + userdata_size);
|
||||
|
||||
/* Version */
|
||||
rd_kafka_buf_write_i16(rkbuf, 1);
|
||||
rd_kafka_buf_write_i16(rkbuf, 3);
|
||||
rd_kafka_buf_write_i32(rkbuf, topic_cnt);
|
||||
RD_LIST_FOREACH(tinfo, topics, i)
|
||||
rd_kafka_buf_write_str(rkbuf, tinfo->topic, -1);
|
||||
|
|
@ -144,13 +152,22 @@ rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new(
|
|||
/* If there are no owned partitions, this is specified as an
|
||||
* empty array, not NULL. */
|
||||
rd_kafka_buf_write_i32(rkbuf, 0); /* Topic count */
|
||||
else
|
||||
else {
|
||||
const rd_kafka_topic_partition_field_t fields[] = {
|
||||
RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
|
||||
RD_KAFKA_TOPIC_PARTITION_FIELD_END};
|
||||
rd_kafka_buf_write_topic_partitions(
|
||||
rkbuf, owned_partitions,
|
||||
rd_false /*don't skip invalid offsets*/,
|
||||
rd_false /*any offset*/, rd_false /*don't write offsets*/,
|
||||
rd_false /*don't write epoch*/,
|
||||
rd_false /*don't write metadata*/);
|
||||
rd_false /*any offset*/, rd_false /*don't use topic id*/,
|
||||
rd_true /*use topic name*/, fields);
|
||||
}
|
||||
|
||||
/* Following data is ignored by consumer version < 2 */
|
||||
rd_kafka_buf_write_i32(rkbuf, generation);
|
||||
|
||||
/* Following data is ignored by consumer version < 3 */
|
||||
rd_kafka_buf_write_kstr(rkbuf, rack_id);
|
||||
|
||||
/* Get binary buffer and allocate a new Kafka Bytes with a copy. */
|
||||
rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
|
||||
|
|
@ -168,9 +185,13 @@ rd_kafkap_bytes_t *rd_kafka_assignor_get_metadata_with_empty_userdata(
|
|||
const rd_kafka_assignor_t *rkas,
|
||||
void *assignor_state,
|
||||
const rd_list_t *topics,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions) {
|
||||
return rd_kafka_consumer_protocol_member_metadata_new(topics, NULL, 0,
|
||||
owned_partitions);
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions,
|
||||
const rd_kafkap_str_t *rack_id) {
|
||||
/* Generation was earlier populated inside userData, and older versions
|
||||
* of clients still expect that. So, in case the userData is empty, we
|
||||
* set the explicit generation field to the default value, -1 */
|
||||
return rd_kafka_consumer_protocol_member_metadata_new(
|
||||
topics, NULL, 0, owned_partitions, -1 /* generation */, rack_id);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -242,6 +263,8 @@ rd_kafka_member_subscriptions_map(rd_kafka_cgrp_t *rkcg,
|
|||
int member_cnt) {
|
||||
int ti;
|
||||
rd_kafka_assignor_topic_t *eligible_topic = NULL;
|
||||
rd_kafka_metadata_internal_t *mdi =
|
||||
rd_kafka_metadata_get_internal(metadata);
|
||||
|
||||
rd_list_init(eligible_topics, RD_MIN(metadata->topic_cnt, 10),
|
||||
(void *)rd_kafka_assignor_topic_destroy);
|
||||
|
|
@ -284,6 +307,7 @@ rd_kafka_member_subscriptions_map(rd_kafka_cgrp_t *rkcg,
|
|||
}
|
||||
|
||||
eligible_topic->metadata = &metadata->topics[ti];
|
||||
eligible_topic->metadata_internal = &mdi->topics[ti];
|
||||
rd_list_add(eligible_topics, eligible_topic);
|
||||
eligible_topic = NULL;
|
||||
}
|
||||
|
|
@ -483,7 +507,8 @@ rd_kafka_resp_err_t rd_kafka_assignor_add(
|
|||
const struct rd_kafka_assignor_s *rkas,
|
||||
void *assignor_state,
|
||||
const rd_list_t *topics,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions),
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions,
|
||||
const rd_kafkap_str_t *rack_id),
|
||||
void (*on_assignment_cb)(const struct rd_kafka_assignor_s *rkas,
|
||||
void **assignor_state,
|
||||
const rd_kafka_topic_partition_list_t *assignment,
|
||||
|
|
@ -634,6 +659,676 @@ void rd_kafka_assignors_term(rd_kafka_t *rk) {
|
|||
rd_list_destroy(&rk->rk_conf.partition_assignors);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Computes whether rack-aware assignment needs to be used, or not.
|
||||
*/
|
||||
rd_bool_t
|
||||
rd_kafka_use_rack_aware_assignment(rd_kafka_assignor_topic_t **topics,
|
||||
size_t topic_cnt,
|
||||
const rd_kafka_metadata_internal_t *mdi) {
|
||||
/* Computing needs_rack_aware_assignment requires the evaluation of
|
||||
three criteria:
|
||||
|
||||
1. At least one of the member has a non-null rack.
|
||||
2. At least one common rack exists between members and partitions.
|
||||
3. There is a partition which doesn't have replicas on all possible
|
||||
racks, or in other words, all partitions don't have replicas on all
|
||||
racks. Note that 'all racks' here means racks across all replicas of
|
||||
all partitions, not including consumer racks. Also note that 'all
|
||||
racks' are computed per-topic for range assignor, and across topics
|
||||
for sticky assignor.
|
||||
*/
|
||||
|
||||
int i;
|
||||
size_t t;
|
||||
rd_kafka_group_member_t *member;
|
||||
rd_list_t *all_consumer_racks = NULL; /* Contained Type: char* */
|
||||
rd_list_t *all_partition_racks = NULL; /* Contained Type: char* */
|
||||
char *rack_id = NULL;
|
||||
rd_bool_t needs_rack_aware_assignment = rd_true; /* assume true */
|
||||
|
||||
/* Criteria 1 */
|
||||
/* We don't copy racks, so the free function is NULL. */
|
||||
all_consumer_racks = rd_list_new(0, NULL);
|
||||
|
||||
for (t = 0; t < topic_cnt; t++) {
|
||||
RD_LIST_FOREACH(member, &topics[t]->members, i) {
|
||||
if (member->rkgm_rack_id &&
|
||||
RD_KAFKAP_STR_LEN(member->rkgm_rack_id)) {
|
||||
/* Repetitions are fine, we will dedup it later.
|
||||
*/
|
||||
rd_list_add(
|
||||
all_consumer_racks,
|
||||
/* The const qualifier has to be discarded
|
||||
because of how rd_list_t and
|
||||
rd_kafkap_str_t are, but we never modify
|
||||
items in all_consumer_racks. */
|
||||
(char *)member->rkgm_rack_id->str);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (rd_list_cnt(all_consumer_racks) == 0) {
|
||||
needs_rack_aware_assignment = rd_false;
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
||||
/* Critera 2 */
|
||||
/* We don't copy racks, so the free function is NULL. */
|
||||
all_partition_racks = rd_list_new(0, NULL);
|
||||
|
||||
for (t = 0; t < topic_cnt; t++) {
|
||||
const int partition_cnt = topics[t]->metadata->partition_cnt;
|
||||
for (i = 0; i < partition_cnt; i++) {
|
||||
size_t j;
|
||||
for (j = 0; j < topics[t]
|
||||
->metadata_internal->partitions[i]
|
||||
.racks_cnt;
|
||||
j++) {
|
||||
char *rack =
|
||||
topics[t]
|
||||
->metadata_internal->partitions[i]
|
||||
.racks[j];
|
||||
rd_list_add(all_partition_racks, rack);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* If there are no partition racks, Criteria 2 cannot possibly be met.
|
||||
*/
|
||||
if (rd_list_cnt(all_partition_racks) == 0) {
|
||||
needs_rack_aware_assignment = rd_false;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Sort and dedup the racks. */
|
||||
rd_list_deduplicate(&all_consumer_racks, rd_strcmp2);
|
||||
rd_list_deduplicate(&all_partition_racks, rd_strcmp2);
|
||||
|
||||
|
||||
/* Iterate through each list in order, and see if there's anything in
|
||||
* common */
|
||||
RD_LIST_FOREACH(rack_id, all_consumer_racks, i) {
|
||||
/* Break if there's even a single match. */
|
||||
if (rd_list_find(all_partition_racks, rack_id, rd_strcmp2)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == rd_list_cnt(all_consumer_racks)) {
|
||||
needs_rack_aware_assignment = rd_false;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Criteria 3 */
|
||||
for (t = 0; t < topic_cnt; t++) {
|
||||
const int partition_cnt = topics[t]->metadata->partition_cnt;
|
||||
for (i = 0; i < partition_cnt; i++) {
|
||||
/* Since partition_racks[i] is a subset of
|
||||
* all_partition_racks, and both of them are deduped,
|
||||
* the same size indicates that they're equal. */
|
||||
if ((size_t)(rd_list_cnt(all_partition_racks)) !=
|
||||
topics[t]
|
||||
->metadata_internal->partitions[i]
|
||||
.racks_cnt) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i < partition_cnt) {
|
||||
/* Break outer loop if inner loop was broken. */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Implies that all partitions have replicas on all racks. */
|
||||
if (t == topic_cnt)
|
||||
needs_rack_aware_assignment = rd_false;
|
||||
|
||||
done:
|
||||
RD_IF_FREE(all_consumer_racks, rd_list_destroy);
|
||||
RD_IF_FREE(all_partition_racks, rd_list_destroy);
|
||||
|
||||
return needs_rack_aware_assignment;
|
||||
}
|
||||
|
||||
|
||||
/* Helper to populate the racks for brokers in the metadata for unit tests.
|
||||
* Passing num_broker_racks = 0 will return NULL racks. */
|
||||
void ut_populate_internal_broker_metadata(rd_kafka_metadata_internal_t *mdi,
|
||||
int num_broker_racks,
|
||||
rd_kafkap_str_t *all_racks[],
|
||||
size_t all_racks_cnt) {
|
||||
int i;
|
||||
|
||||
rd_assert(num_broker_racks < (int)all_racks_cnt);
|
||||
|
||||
for (i = 0; i < mdi->metadata.broker_cnt; i++) {
|
||||
mdi->brokers[i].id = i;
|
||||
/* Cast from const to non-const. We don't intend to modify it,
|
||||
* but unfortunately neither implementation of rd_kafkap_str_t
|
||||
* or rd_kafka_metadata_broker_internal_t can be changed. So,
|
||||
* this cast is used - in unit tests only. */
|
||||
mdi->brokers[i].rack_id =
|
||||
(char *)(num_broker_racks
|
||||
? all_racks[i % num_broker_racks]->str
|
||||
: NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/* Helper to populate the deduplicated racks inside each partition. It's assumed
|
||||
* that `mdi->brokers` is set, maybe using
|
||||
* `ut_populate_internal_broker_metadata`. */
|
||||
void ut_populate_internal_topic_metadata(rd_kafka_metadata_internal_t *mdi) {
|
||||
int ti;
|
||||
rd_kafka_metadata_broker_internal_t *brokers_internal;
|
||||
size_t broker_cnt;
|
||||
|
||||
rd_assert(mdi->brokers);
|
||||
|
||||
brokers_internal = mdi->brokers;
|
||||
broker_cnt = mdi->metadata.broker_cnt;
|
||||
|
||||
for (ti = 0; ti < mdi->metadata.topic_cnt; ti++) {
|
||||
int i;
|
||||
rd_kafka_metadata_topic_t *mdt = &mdi->metadata.topics[ti];
|
||||
rd_kafka_metadata_topic_internal_t *mdti = &mdi->topics[ti];
|
||||
|
||||
for (i = 0; i < mdt->partition_cnt; i++) {
|
||||
int j;
|
||||
rd_kafka_metadata_partition_t *partition =
|
||||
&mdt->partitions[i];
|
||||
rd_kafka_metadata_partition_internal_t
|
||||
*partition_internal = &mdti->partitions[i];
|
||||
|
||||
rd_list_t *curr_list;
|
||||
char *rack;
|
||||
|
||||
if (partition->replica_cnt == 0)
|
||||
continue;
|
||||
|
||||
curr_list = rd_list_new(
|
||||
0, NULL); /* use a list for de-duplication */
|
||||
for (j = 0; j < partition->replica_cnt; j++) {
|
||||
rd_kafka_metadata_broker_internal_t key = {
|
||||
.id = partition->replicas[j]};
|
||||
rd_kafka_metadata_broker_internal_t *broker =
|
||||
bsearch(
|
||||
&key, brokers_internal, broker_cnt,
|
||||
sizeof(
|
||||
rd_kafka_metadata_broker_internal_t),
|
||||
rd_kafka_metadata_broker_internal_cmp);
|
||||
if (!broker || !broker->rack_id)
|
||||
continue;
|
||||
rd_list_add(curr_list, broker->rack_id);
|
||||
}
|
||||
rd_list_deduplicate(&curr_list, rd_strcmp2);
|
||||
|
||||
partition_internal->racks_cnt = rd_list_cnt(curr_list);
|
||||
partition_internal->racks = rd_malloc(
|
||||
sizeof(char *) * partition_internal->racks_cnt);
|
||||
RD_LIST_FOREACH(rack, curr_list, j) {
|
||||
partition_internal->racks[j] =
|
||||
rack; /* no duplication */
|
||||
}
|
||||
rd_list_destroy(curr_list);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Helper to destroy test metadata. Destroying the metadata has some additional
|
||||
* steps in case of tests. */
|
||||
void ut_destroy_metadata(rd_kafka_metadata_t *md) {
|
||||
int ti;
|
||||
rd_kafka_metadata_internal_t *mdi = rd_kafka_metadata_get_internal(md);
|
||||
|
||||
for (ti = 0; ti < md->topic_cnt; ti++) {
|
||||
int i;
|
||||
rd_kafka_metadata_topic_t *mdt = &md->topics[ti];
|
||||
rd_kafka_metadata_topic_internal_t *mdti = &mdi->topics[ti];
|
||||
|
||||
for (i = 0; mdti && i < mdt->partition_cnt; i++) {
|
||||
rd_free(mdti->partitions[i].racks);
|
||||
}
|
||||
}
|
||||
|
||||
rd_kafka_metadata_destroy(md);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Set a member's owned partitions based on its assignment.
|
||||
*
|
||||
* For use between assignor_run(). This is mimicing a consumer receiving
|
||||
* its new assignment and including it in the next rebalance as its
|
||||
* owned-partitions.
|
||||
*/
|
||||
void ut_set_owned(rd_kafka_group_member_t *rkgm) {
|
||||
if (rkgm->rkgm_owned)
|
||||
rd_kafka_topic_partition_list_destroy(rkgm->rkgm_owned);
|
||||
|
||||
rkgm->rkgm_owned =
|
||||
rd_kafka_topic_partition_list_copy(rkgm->rkgm_assignment);
|
||||
}
|
||||
|
||||
|
||||
void ut_print_toppar_list(const rd_kafka_topic_partition_list_t *partitions) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < partitions->cnt; i++)
|
||||
RD_UT_SAY(" %s [%" PRId32 "]", partitions->elems[i].topic,
|
||||
partitions->elems[i].partition);
|
||||
}
|
||||
|
||||
|
||||
/* Implementation for ut_init_member and ut_init_member_with_rackv. */
|
||||
static void ut_init_member_internal(rd_kafka_group_member_t *rkgm,
|
||||
const char *member_id,
|
||||
const rd_kafkap_str_t *rack_id,
|
||||
va_list ap) {
|
||||
const char *topic;
|
||||
|
||||
memset(rkgm, 0, sizeof(*rkgm));
|
||||
|
||||
rkgm->rkgm_member_id = rd_kafkap_str_new(member_id, -1);
|
||||
rkgm->rkgm_group_instance_id = rd_kafkap_str_new(member_id, -1);
|
||||
rkgm->rkgm_rack_id = rack_id ? rd_kafkap_str_copy(rack_id) : NULL;
|
||||
|
||||
rd_list_init(&rkgm->rkgm_eligible, 0, NULL);
|
||||
|
||||
rkgm->rkgm_subscription = rd_kafka_topic_partition_list_new(4);
|
||||
|
||||
while ((topic = va_arg(ap, const char *)))
|
||||
rd_kafka_topic_partition_list_add(rkgm->rkgm_subscription,
|
||||
topic, RD_KAFKA_PARTITION_UA);
|
||||
|
||||
rkgm->rkgm_assignment =
|
||||
rd_kafka_topic_partition_list_new(rkgm->rkgm_subscription->size);
|
||||
|
||||
rkgm->rkgm_generation = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Initialize group member struct for testing.
|
||||
*
|
||||
* va-args is a NULL-terminated list of (const char *) topics.
|
||||
*
|
||||
* Use rd_kafka_group_member_clear() to free fields.
|
||||
*/
|
||||
void ut_init_member(rd_kafka_group_member_t *rkgm, const char *member_id, ...) {
|
||||
va_list ap;
|
||||
va_start(ap, member_id);
|
||||
ut_init_member_internal(rkgm, member_id, NULL, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Initialize group member struct for testing with a rackid.
|
||||
*
|
||||
* va-args is a NULL-terminated list of (const char *) topics.
|
||||
*
|
||||
* Use rd_kafka_group_member_clear() to free fields.
|
||||
*/
|
||||
void ut_init_member_with_rackv(rd_kafka_group_member_t *rkgm,
|
||||
const char *member_id,
|
||||
const rd_kafkap_str_t *rack_id,
|
||||
...) {
|
||||
va_list ap;
|
||||
va_start(ap, rack_id);
|
||||
ut_init_member_internal(rkgm, member_id, rack_id, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Initialize group member struct for testing with a rackid.
|
||||
*
|
||||
* Topics that the member is subscribed to are specified in an array with the
|
||||
* size specified separately.
|
||||
*
|
||||
* Use rd_kafka_group_member_clear() to free fields.
|
||||
*/
|
||||
void ut_init_member_with_rack(rd_kafka_group_member_t *rkgm,
|
||||
const char *member_id,
|
||||
const rd_kafkap_str_t *rack_id,
|
||||
char *topics[],
|
||||
size_t topic_cnt) {
|
||||
size_t i;
|
||||
|
||||
memset(rkgm, 0, sizeof(*rkgm));
|
||||
|
||||
rkgm->rkgm_member_id = rd_kafkap_str_new(member_id, -1);
|
||||
rkgm->rkgm_group_instance_id = rd_kafkap_str_new(member_id, -1);
|
||||
rkgm->rkgm_rack_id = rack_id ? rd_kafkap_str_copy(rack_id) : NULL;
|
||||
rd_list_init(&rkgm->rkgm_eligible, 0, NULL);
|
||||
|
||||
rkgm->rkgm_subscription = rd_kafka_topic_partition_list_new(4);
|
||||
|
||||
for (i = 0; i < topic_cnt; i++) {
|
||||
rd_kafka_topic_partition_list_add(
|
||||
rkgm->rkgm_subscription, topics[i], RD_KAFKA_PARTITION_UA);
|
||||
}
|
||||
rkgm->rkgm_assignment =
|
||||
rd_kafka_topic_partition_list_new(rkgm->rkgm_subscription->size);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Verify that member's assignment matches the expected partitions.
|
||||
*
|
||||
* The va-list is a NULL-terminated list of (const char *topic, int partition)
|
||||
* tuples.
|
||||
*
|
||||
* @returns 0 on success, else raises a unittest error and returns 1.
|
||||
*/
|
||||
int verifyAssignment0(const char *function,
|
||||
int line,
|
||||
rd_kafka_group_member_t *rkgm,
|
||||
...) {
|
||||
va_list ap;
|
||||
int cnt = 0;
|
||||
const char *topic;
|
||||
int fails = 0;
|
||||
|
||||
va_start(ap, rkgm);
|
||||
while ((topic = va_arg(ap, const char *))) {
|
||||
int partition = va_arg(ap, int);
|
||||
cnt++;
|
||||
|
||||
if (!rd_kafka_topic_partition_list_find(rkgm->rkgm_assignment,
|
||||
topic, partition)) {
|
||||
RD_UT_WARN(
|
||||
"%s:%d: Expected %s [%d] not found in %s's "
|
||||
"assignment (%d partition(s))",
|
||||
function, line, topic, partition,
|
||||
rkgm->rkgm_member_id->str,
|
||||
rkgm->rkgm_assignment->cnt);
|
||||
fails++;
|
||||
}
|
||||
}
|
||||
va_end(ap);
|
||||
|
||||
if (cnt != rkgm->rkgm_assignment->cnt) {
|
||||
RD_UT_WARN(
|
||||
"%s:%d: "
|
||||
"Expected %d assigned partition(s) for %s, not %d",
|
||||
function, line, cnt, rkgm->rkgm_member_id->str,
|
||||
rkgm->rkgm_assignment->cnt);
|
||||
fails++;
|
||||
}
|
||||
|
||||
if (fails)
|
||||
ut_print_toppar_list(rkgm->rkgm_assignment);
|
||||
|
||||
RD_UT_ASSERT(!fails, "%s:%d: See previous errors", function, line);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Verify that all members' assignment matches the expected partitions.
|
||||
*
|
||||
* The va-list is a list of (const char *topic, int partition)
|
||||
* tuples, and NULL to demarcate different members' assignment.
|
||||
*
|
||||
* @returns 0 on success, else raises a unittest error and returns 1.
|
||||
*/
|
||||
int verifyMultipleAssignment0(const char *function,
|
||||
int line,
|
||||
rd_kafka_group_member_t *rkgms,
|
||||
size_t member_cnt,
|
||||
...) {
|
||||
va_list ap;
|
||||
const char *topic;
|
||||
int fails = 0;
|
||||
size_t i = 0;
|
||||
|
||||
if (member_cnt == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
va_start(ap, member_cnt);
|
||||
for (i = 0; i < member_cnt; i++) {
|
||||
rd_kafka_group_member_t *rkgm = &rkgms[i];
|
||||
int cnt = 0;
|
||||
int local_fails = 0;
|
||||
|
||||
while ((topic = va_arg(ap, const char *))) {
|
||||
int partition = va_arg(ap, int);
|
||||
cnt++;
|
||||
|
||||
if (!rd_kafka_topic_partition_list_find(
|
||||
rkgm->rkgm_assignment, topic, partition)) {
|
||||
RD_UT_WARN(
|
||||
"%s:%d: Expected %s [%d] not found in %s's "
|
||||
"assignment (%d partition(s))",
|
||||
function, line, topic, partition,
|
||||
rkgm->rkgm_member_id->str,
|
||||
rkgm->rkgm_assignment->cnt);
|
||||
local_fails++;
|
||||
}
|
||||
}
|
||||
|
||||
if (cnt != rkgm->rkgm_assignment->cnt) {
|
||||
RD_UT_WARN(
|
||||
"%s:%d: "
|
||||
"Expected %d assigned partition(s) for %s, not %d",
|
||||
function, line, cnt, rkgm->rkgm_member_id->str,
|
||||
rkgm->rkgm_assignment->cnt);
|
||||
fails++;
|
||||
}
|
||||
|
||||
if (local_fails)
|
||||
ut_print_toppar_list(rkgm->rkgm_assignment);
|
||||
fails += local_fails;
|
||||
}
|
||||
va_end(ap);
|
||||
|
||||
RD_UT_ASSERT(!fails, "%s:%d: See previous errors", function, line);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#define verifyNumPartitionsWithRackMismatchPartition(rktpar, metadata, \
|
||||
increase) \
|
||||
do { \
|
||||
if (!rktpar) \
|
||||
break; \
|
||||
int i; \
|
||||
rd_bool_t noneMatch = rd_true; \
|
||||
rd_kafka_metadata_internal_t *metadata_internal = \
|
||||
rd_kafka_metadata_get_internal(metadata); \
|
||||
\
|
||||
for (i = 0; i < metadata->topics[j].partitions[k].replica_cnt; \
|
||||
i++) { \
|
||||
int32_t replica_id = \
|
||||
metadata->topics[j].partitions[k].replicas[i]; \
|
||||
rd_kafka_metadata_broker_internal_t *broker; \
|
||||
rd_kafka_metadata_broker_internal_find( \
|
||||
metadata_internal, replica_id, broker); \
|
||||
\
|
||||
if (broker && !strcmp(rack_id, broker->rack_id)) { \
|
||||
noneMatch = rd_false; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
if (noneMatch) \
|
||||
increase++; \
|
||||
} while (0);
|
||||
|
||||
/**
|
||||
* @brief Verify number of partitions with rack mismatch.
|
||||
*/
|
||||
int verifyNumPartitionsWithRackMismatch0(const char *function,
|
||||
int line,
|
||||
rd_kafka_metadata_t *metadata,
|
||||
rd_kafka_group_member_t *rkgms,
|
||||
size_t member_cnt,
|
||||
int expectedNumMismatch) {
|
||||
size_t i;
|
||||
int j, k;
|
||||
|
||||
int numMismatched = 0;
|
||||
for (i = 0; i < member_cnt; i++) {
|
||||
rd_kafka_group_member_t *rkgm = &rkgms[i];
|
||||
const char *rack_id = rkgm->rkgm_rack_id->str;
|
||||
if (rack_id) {
|
||||
for (j = 0; j < metadata->topic_cnt; j++) {
|
||||
for (k = 0;
|
||||
k < metadata->topics[j].partition_cnt;
|
||||
k++) {
|
||||
rd_kafka_topic_partition_t *rktpar =
|
||||
rd_kafka_topic_partition_list_find(
|
||||
rkgm->rkgm_assignment,
|
||||
metadata->topics[j].topic, k);
|
||||
verifyNumPartitionsWithRackMismatchPartition(
|
||||
rktpar, metadata, numMismatched);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
RD_UT_ASSERT(expectedNumMismatch == numMismatched,
|
||||
"%s:%d: Expected %d mismatches, got %d", function, line,
|
||||
expectedNumMismatch, numMismatched);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int verifyValidityAndBalance0(const char *func,
|
||||
int line,
|
||||
rd_kafka_group_member_t *members,
|
||||
size_t member_cnt,
|
||||
const rd_kafka_metadata_t *metadata) {
|
||||
int fails = 0;
|
||||
int i;
|
||||
rd_bool_t verbose = rd_false; /* Enable for troubleshooting */
|
||||
|
||||
RD_UT_SAY("%s:%d: verifying assignment for %d member(s):", func, line,
|
||||
(int)member_cnt);
|
||||
|
||||
for (i = 0; i < (int)member_cnt; i++) {
|
||||
const char *consumer = members[i].rkgm_member_id->str;
|
||||
const rd_kafka_topic_partition_list_t *partitions =
|
||||
members[i].rkgm_assignment;
|
||||
int p, j;
|
||||
|
||||
if (verbose)
|
||||
RD_UT_SAY(
|
||||
"%s:%d: "
|
||||
"consumer \"%s\", %d subscribed topic(s), "
|
||||
"%d assigned partition(s):",
|
||||
func, line, consumer,
|
||||
members[i].rkgm_subscription->cnt, partitions->cnt);
|
||||
|
||||
for (p = 0; p < partitions->cnt; p++) {
|
||||
const rd_kafka_topic_partition_t *partition =
|
||||
&partitions->elems[p];
|
||||
|
||||
if (verbose)
|
||||
RD_UT_SAY("%s:%d: %s [%" PRId32 "]", func,
|
||||
line, partition->topic,
|
||||
partition->partition);
|
||||
|
||||
if (!rd_kafka_topic_partition_list_find(
|
||||
members[i].rkgm_subscription, partition->topic,
|
||||
RD_KAFKA_PARTITION_UA)) {
|
||||
RD_UT_WARN("%s [%" PRId32
|
||||
"] is assigned to "
|
||||
"%s but it is not subscribed to "
|
||||
"that topic",
|
||||
partition->topic,
|
||||
partition->partition, consumer);
|
||||
fails++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update the member's owned partitions to match
|
||||
* the assignment. */
|
||||
ut_set_owned(&members[i]);
|
||||
|
||||
if (i == (int)member_cnt - 1)
|
||||
continue;
|
||||
|
||||
for (j = i + 1; j < (int)member_cnt; j++) {
|
||||
const char *otherConsumer =
|
||||
members[j].rkgm_member_id->str;
|
||||
const rd_kafka_topic_partition_list_t *otherPartitions =
|
||||
members[j].rkgm_assignment;
|
||||
rd_bool_t balanced =
|
||||
abs(partitions->cnt - otherPartitions->cnt) <= 1;
|
||||
|
||||
for (p = 0; p < partitions->cnt; p++) {
|
||||
const rd_kafka_topic_partition_t *partition =
|
||||
&partitions->elems[p];
|
||||
|
||||
if (rd_kafka_topic_partition_list_find(
|
||||
otherPartitions, partition->topic,
|
||||
partition->partition)) {
|
||||
RD_UT_WARN(
|
||||
"Consumer %s and %s are both "
|
||||
"assigned %s [%" PRId32 "]",
|
||||
consumer, otherConsumer,
|
||||
partition->topic,
|
||||
partition->partition);
|
||||
fails++;
|
||||
}
|
||||
|
||||
|
||||
/* If assignment is imbalanced and this topic
|
||||
* is also subscribed by the other consumer
|
||||
* it means the assignment strategy failed to
|
||||
* properly balance the partitions. */
|
||||
if (!balanced &&
|
||||
rd_kafka_topic_partition_list_find_topic_by_name(
|
||||
otherPartitions, partition->topic)) {
|
||||
RD_UT_WARN(
|
||||
"Some %s partition(s) can be "
|
||||
"moved from "
|
||||
"%s (%d partition(s)) to "
|
||||
"%s (%d partition(s)) to "
|
||||
"achieve a better balance",
|
||||
partition->topic, consumer,
|
||||
partitions->cnt, otherConsumer,
|
||||
otherPartitions->cnt);
|
||||
fails++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
RD_UT_ASSERT(!fails, "%s:%d: See %d previous errors", func, line,
|
||||
fails);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Checks that all assigned partitions are fully balanced.
|
||||
*
|
||||
* Only works for symmetrical subscriptions.
|
||||
*/
|
||||
int isFullyBalanced0(const char *function,
|
||||
int line,
|
||||
const rd_kafka_group_member_t *members,
|
||||
size_t member_cnt) {
|
||||
int min_assignment = INT_MAX;
|
||||
int max_assignment = -1;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < member_cnt; i++) {
|
||||
int size = members[i].rkgm_assignment->cnt;
|
||||
if (size < min_assignment)
|
||||
min_assignment = size;
|
||||
if (size > max_assignment)
|
||||
max_assignment = size;
|
||||
}
|
||||
|
||||
RD_UT_ASSERT(max_assignment - min_assignment <= 1,
|
||||
"%s:%d: Assignment not balanced: min %d, max %d", function,
|
||||
line, min_assignment, max_assignment);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
|
|
@ -879,6 +1574,7 @@ static int ut_assignors(void) {
|
|||
/* Run through test cases */
|
||||
for (i = 0; tests[i].name; i++) {
|
||||
int ie, it, im;
|
||||
rd_kafka_metadata_internal_t metadata_internal;
|
||||
rd_kafka_metadata_t metadata;
|
||||
rd_kafka_group_member_t *members;
|
||||
|
||||
|
|
@ -886,14 +1582,38 @@ static int ut_assignors(void) {
|
|||
metadata.topic_cnt = tests[i].topic_cnt;
|
||||
metadata.topics =
|
||||
rd_alloca(sizeof(*metadata.topics) * metadata.topic_cnt);
|
||||
metadata_internal.topics = rd_alloca(
|
||||
sizeof(*metadata_internal.topics) * metadata.topic_cnt);
|
||||
|
||||
memset(metadata.topics, 0,
|
||||
sizeof(*metadata.topics) * metadata.topic_cnt);
|
||||
memset(metadata_internal.topics, 0,
|
||||
sizeof(*metadata_internal.topics) * metadata.topic_cnt);
|
||||
|
||||
for (it = 0; it < metadata.topic_cnt; it++) {
|
||||
int pt;
|
||||
metadata.topics[it].topic =
|
||||
(char *)tests[i].topics[it].name;
|
||||
metadata.topics[it].partition_cnt =
|
||||
tests[i].topics[it].partition_cnt;
|
||||
metadata.topics[it].partitions = NULL; /* Not used */
|
||||
metadata.topics[it].partitions =
|
||||
rd_alloca(metadata.topics[it].partition_cnt *
|
||||
sizeof(rd_kafka_metadata_partition_t));
|
||||
metadata_internal.topics[it].partitions = rd_alloca(
|
||||
metadata.topics[it].partition_cnt *
|
||||
sizeof(rd_kafka_metadata_partition_internal_t));
|
||||
for (pt = 0; pt < metadata.topics[it].partition_cnt;
|
||||
pt++) {
|
||||
metadata.topics[it].partitions[pt].id = pt;
|
||||
metadata.topics[it].partitions[pt].replica_cnt =
|
||||
0;
|
||||
metadata_internal.topics[it]
|
||||
.partitions[pt]
|
||||
.racks_cnt = 0;
|
||||
metadata_internal.topics[it]
|
||||
.partitions[pt]
|
||||
.racks = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Create members */
|
||||
|
|
@ -944,9 +1664,12 @@ static int ut_assignors(void) {
|
|||
}
|
||||
|
||||
/* Run assignor */
|
||||
metadata_internal.metadata = metadata;
|
||||
err = rd_kafka_assignor_run(
|
||||
rk->rk_cgrp, rkas, &metadata, members,
|
||||
tests[i].member_cnt, errstr, sizeof(errstr));
|
||||
rk->rk_cgrp, rkas,
|
||||
(rd_kafka_metadata_t *)(&metadata_internal),
|
||||
members, tests[i].member_cnt, errstr,
|
||||
sizeof(errstr));
|
||||
|
||||
RD_UT_ASSERT(!err, "Assignor case %s for %s failed: %s",
|
||||
tests[i].name,
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2015 Magnus Edenhill
|
||||
* Copyright (c) 2015-2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -28,7 +29,7 @@
|
|||
#ifndef _RDKAFKA_ASSIGNOR_H_
|
||||
#define _RDKAFKA_ASSIGNOR_H_
|
||||
|
||||
|
||||
#include "rdkafka_metadata.h"
|
||||
|
||||
/*!
|
||||
* Enumerates the different rebalance protocol types.
|
||||
|
|
@ -69,6 +70,8 @@ typedef struct rd_kafka_group_member_s {
|
|||
rd_kafkap_bytes_t *rkgm_member_metadata;
|
||||
/** Group generation id. */
|
||||
int rkgm_generation;
|
||||
/** Member rack id. */
|
||||
rd_kafkap_str_t *rkgm_rack_id;
|
||||
} rd_kafka_group_member_t;
|
||||
|
||||
|
||||
|
|
@ -78,13 +81,13 @@ int rd_kafka_group_member_find_subscription(rd_kafka_t *rk,
|
|||
const rd_kafka_group_member_t *rkgm,
|
||||
const char *topic);
|
||||
|
||||
|
||||
/**
|
||||
* Structure to hold metadata for a single topic and all its
|
||||
* subscribing members.
|
||||
*/
|
||||
typedef struct rd_kafka_assignor_topic_s {
|
||||
const rd_kafka_metadata_topic_t *metadata;
|
||||
const rd_kafka_metadata_topic_internal_t *metadata_internal;
|
||||
rd_list_t members; /* rd_kafka_group_member_t * */
|
||||
} rd_kafka_assignor_topic_t;
|
||||
|
||||
|
|
@ -120,7 +123,8 @@ typedef struct rd_kafka_assignor_s {
|
|||
const struct rd_kafka_assignor_s *rkas,
|
||||
void *assignor_state,
|
||||
const rd_list_t *topics,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions);
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions,
|
||||
const rd_kafkap_str_t *rack_id);
|
||||
|
||||
void (*rkas_on_assignment_cb)(
|
||||
const struct rd_kafka_assignor_s *rkas,
|
||||
|
|
@ -158,7 +162,8 @@ rd_kafka_resp_err_t rd_kafka_assignor_add(
|
|||
const struct rd_kafka_assignor_s *rkas,
|
||||
void *assignor_state,
|
||||
const rd_list_t *topics,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions),
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions,
|
||||
const rd_kafkap_str_t *rack_id),
|
||||
void (*on_assignment_cb)(const struct rd_kafka_assignor_s *rkas,
|
||||
void **assignor_state,
|
||||
const rd_kafka_topic_partition_list_t *assignment,
|
||||
|
|
@ -172,13 +177,16 @@ rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new(
|
|||
const rd_list_t *topics,
|
||||
const void *userdata,
|
||||
size_t userdata_size,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions);
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions,
|
||||
int generation,
|
||||
const rd_kafkap_str_t *rack_id);
|
||||
|
||||
rd_kafkap_bytes_t *rd_kafka_assignor_get_metadata_with_empty_userdata(
|
||||
const rd_kafka_assignor_t *rkas,
|
||||
void *assignor_state,
|
||||
const rd_list_t *topics,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions);
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions,
|
||||
const rd_kafkap_str_t *rack_id);
|
||||
|
||||
|
||||
void rd_kafka_assignor_update_subscription(
|
||||
|
|
@ -208,5 +216,187 @@ void rd_kafka_group_member_clear(rd_kafka_group_member_t *rkgm);
|
|||
rd_kafka_resp_err_t rd_kafka_range_assignor_init(rd_kafka_t *rk);
|
||||
rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_init(rd_kafka_t *rk);
|
||||
rd_kafka_resp_err_t rd_kafka_sticky_assignor_init(rd_kafka_t *rk);
|
||||
rd_bool_t
|
||||
rd_kafka_use_rack_aware_assignment(rd_kafka_assignor_topic_t **topics,
|
||||
size_t topic_cnt,
|
||||
const rd_kafka_metadata_internal_t *mdi);
|
||||
|
||||
/**
|
||||
* @name Common unit test functions, macros, and enums to use across assignors.
|
||||
*
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
/* Tests can be parametrized to contain either only broker racks, only consumer
|
||||
* racks or both.*/
|
||||
typedef enum {
|
||||
RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK = 0,
|
||||
RD_KAFKA_RANGE_ASSIGNOR_UT_NO_CONSUMER_RACK = 1,
|
||||
RD_KAFKA_RANGE_ASSIGNOR_UT_BROKER_AND_CONSUMER_RACK = 2,
|
||||
RD_KAFKA_RANGE_ASSIGNOR_UT_CONFIG_CNT = 3,
|
||||
} rd_kafka_assignor_ut_rack_config_t;
|
||||
|
||||
|
||||
void ut_populate_internal_broker_metadata(rd_kafka_metadata_internal_t *mdi,
|
||||
int num_broker_racks,
|
||||
rd_kafkap_str_t *all_racks[],
|
||||
size_t all_racks_cnt);
|
||||
|
||||
void ut_populate_internal_topic_metadata(rd_kafka_metadata_internal_t *mdi);
|
||||
|
||||
void ut_destroy_metadata(rd_kafka_metadata_t *md);
|
||||
|
||||
void ut_set_owned(rd_kafka_group_member_t *rkgm);
|
||||
|
||||
void ut_print_toppar_list(const rd_kafka_topic_partition_list_t *partitions);
|
||||
|
||||
void ut_init_member(rd_kafka_group_member_t *rkgm, const char *member_id, ...);
|
||||
|
||||
void ut_init_member_with_rackv(rd_kafka_group_member_t *rkgm,
|
||||
const char *member_id,
|
||||
const rd_kafkap_str_t *rack_id,
|
||||
...);
|
||||
|
||||
void ut_init_member_with_rack(rd_kafka_group_member_t *rkgm,
|
||||
const char *member_id,
|
||||
const rd_kafkap_str_t *rack_id,
|
||||
char *topics[],
|
||||
size_t topic_cnt);
|
||||
|
||||
int verifyAssignment0(const char *function,
|
||||
int line,
|
||||
rd_kafka_group_member_t *rkgm,
|
||||
...);
|
||||
|
||||
int verifyMultipleAssignment0(const char *function,
|
||||
int line,
|
||||
rd_kafka_group_member_t *rkgms,
|
||||
size_t member_cnt,
|
||||
...);
|
||||
|
||||
int verifyNumPartitionsWithRackMismatch0(const char *function,
|
||||
int line,
|
||||
rd_kafka_metadata_t *metadata,
|
||||
rd_kafka_group_member_t *rkgms,
|
||||
size_t member_cnt,
|
||||
int expectedNumMismatch);
|
||||
|
||||
#define verifyAssignment(rkgm, ...) \
|
||||
do { \
|
||||
if (verifyAssignment0(__FUNCTION__, __LINE__, rkgm, \
|
||||
__VA_ARGS__)) \
|
||||
return 1; \
|
||||
} while (0)
|
||||
|
||||
#define verifyMultipleAssignment(rkgms, member_cnt, ...) \
|
||||
do { \
|
||||
if (verifyMultipleAssignment0(__FUNCTION__, __LINE__, rkgms, \
|
||||
member_cnt, __VA_ARGS__)) \
|
||||
return 1; \
|
||||
} while (0)
|
||||
|
||||
#define verifyNumPartitionsWithRackMismatch(metadata, rkgms, member_cnt, \
|
||||
expectedNumMismatch) \
|
||||
do { \
|
||||
if (verifyNumPartitionsWithRackMismatch0( \
|
||||
__FUNCTION__, __LINE__, metadata, rkgms, member_cnt, \
|
||||
expectedNumMismatch)) \
|
||||
return 1; \
|
||||
} while (0)
|
||||
|
||||
int verifyValidityAndBalance0(const char *func,
|
||||
int line,
|
||||
rd_kafka_group_member_t *members,
|
||||
size_t member_cnt,
|
||||
const rd_kafka_metadata_t *metadata);
|
||||
|
||||
#define verifyValidityAndBalance(members, member_cnt, metadata) \
|
||||
do { \
|
||||
if (verifyValidityAndBalance0(__FUNCTION__, __LINE__, members, \
|
||||
member_cnt, metadata)) \
|
||||
return 1; \
|
||||
} while (0)
|
||||
|
||||
int isFullyBalanced0(const char *function,
|
||||
int line,
|
||||
const rd_kafka_group_member_t *members,
|
||||
size_t member_cnt);
|
||||
|
||||
#define isFullyBalanced(members, member_cnt) \
|
||||
do { \
|
||||
if (isFullyBalanced0(__FUNCTION__, __LINE__, members, \
|
||||
member_cnt)) \
|
||||
return 1; \
|
||||
} while (0)
|
||||
|
||||
/* Helper macro to initialize a consumer with or without a rack depending on the
|
||||
* value of parametrization. */
|
||||
#define ut_initMemberConditionalRack(member_ptr, member_id, rack, \
|
||||
parametrization, ...) \
|
||||
do { \
|
||||
if (parametrization == \
|
||||
RD_KAFKA_RANGE_ASSIGNOR_UT_NO_CONSUMER_RACK) { \
|
||||
ut_init_member(member_ptr, member_id, __VA_ARGS__); \
|
||||
} else { \
|
||||
ut_init_member_with_rackv(member_ptr, member_id, rack, \
|
||||
__VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/* Helper macro to initialize rd_kafka_metadata_t* with or without replicas
|
||||
* depending on the value of parametrization. This accepts variadic arguments
|
||||
* for topics. */
|
||||
#define ut_initMetadataConditionalRack(metadataPtr, replication_factor, \
|
||||
num_broker_racks, all_racks, \
|
||||
all_racks_cnt, parametrization, ...) \
|
||||
do { \
|
||||
int num_brokers = num_broker_racks > 0 \
|
||||
? replication_factor * num_broker_racks \
|
||||
: replication_factor; \
|
||||
if (parametrization == \
|
||||
RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK) { \
|
||||
*(metadataPtr) = \
|
||||
rd_kafka_metadata_new_topic_mockv(__VA_ARGS__); \
|
||||
} else { \
|
||||
*(metadataPtr) = \
|
||||
rd_kafka_metadata_new_topic_with_partition_replicas_mockv( \
|
||||
replication_factor, num_brokers, __VA_ARGS__); \
|
||||
ut_populate_internal_broker_metadata( \
|
||||
rd_kafka_metadata_get_internal(*(metadataPtr)), \
|
||||
num_broker_racks, all_racks, all_racks_cnt); \
|
||||
ut_populate_internal_topic_metadata( \
|
||||
rd_kafka_metadata_get_internal(*(metadataPtr))); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* Helper macro to initialize rd_kafka_metadata_t* with or without replicas
|
||||
* depending on the value of parametrization. This accepts a list of topics,
|
||||
* rather than being variadic.
|
||||
*/
|
||||
#define ut_initMetadataConditionalRack0( \
|
||||
metadataPtr, replication_factor, num_broker_racks, all_racks, \
|
||||
all_racks_cnt, parametrization, topics, topic_cnt) \
|
||||
do { \
|
||||
int num_brokers = num_broker_racks > 0 \
|
||||
? replication_factor * num_broker_racks \
|
||||
: replication_factor; \
|
||||
if (parametrization == \
|
||||
RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK) { \
|
||||
*(metadataPtr) = rd_kafka_metadata_new_topic_mock( \
|
||||
topics, topic_cnt, -1, 0); \
|
||||
} else { \
|
||||
*(metadataPtr) = rd_kafka_metadata_new_topic_mock( \
|
||||
topics, topic_cnt, replication_factor, \
|
||||
num_brokers); \
|
||||
ut_populate_internal_broker_metadata( \
|
||||
rd_kafka_metadata_get_internal(*(metadataPtr)), \
|
||||
num_broker_racks, all_racks, all_racks_cnt); \
|
||||
ut_populate_internal_topic_metadata( \
|
||||
rd_kafka_metadata_get_internal(*(metadataPtr))); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
#endif /* _RDKAFKA_ASSIGNOR_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018 Magnus Edenhill
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -234,19 +235,60 @@ void rd_kafka_acl_result_free(void *ptr) {
|
|||
* @return A new allocated Node object.
|
||||
* Use rd_kafka_Node_destroy() to free when done.
|
||||
*/
|
||||
rd_kafka_Node_t *rd_kafka_Node_new(int id,
|
||||
rd_kafka_Node_t *rd_kafka_Node_new(int32_t id,
|
||||
const char *host,
|
||||
uint16_t port,
|
||||
const char *rack_id) {
|
||||
const char *rack) {
|
||||
rd_kafka_Node_t *ret = rd_calloc(1, sizeof(*ret));
|
||||
ret->id = id;
|
||||
ret->port = port;
|
||||
ret->host = rd_strdup(host);
|
||||
if (rack_id != NULL)
|
||||
ret->rack_id = rd_strdup(rack_id);
|
||||
if (rack != NULL)
|
||||
ret->rack = rd_strdup(rack);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Create a new Node object given a node id, and use broker information
|
||||
* to populate other fields.
|
||||
*
|
||||
* @return A new allocated Node object.
|
||||
* Use rd_kafka_Node_destroy() to free when done.
|
||||
* @remark The \p brokers_sorted and \p brokers_internal arrays are asumed to be
|
||||
* sorted by id.
|
||||
*/
|
||||
rd_kafka_Node_t *rd_kafka_Node_new_from_brokers(
|
||||
int32_t id,
|
||||
const struct rd_kafka_metadata_broker *brokers_sorted,
|
||||
const rd_kafka_metadata_broker_internal_t *brokers_internal,
|
||||
int broker_cnt) {
|
||||
rd_kafka_Node_t *node = rd_calloc(1, sizeof(*node));
|
||||
struct rd_kafka_metadata_broker key_sorted = {.id = id};
|
||||
rd_kafka_metadata_broker_internal_t key_internal = {.id = id};
|
||||
|
||||
struct rd_kafka_metadata_broker *broker =
|
||||
bsearch(&key_sorted, brokers_sorted, broker_cnt,
|
||||
sizeof(struct rd_kafka_metadata_broker),
|
||||
rd_kafka_metadata_broker_cmp);
|
||||
|
||||
rd_kafka_metadata_broker_internal_t *broker_internal =
|
||||
bsearch(&key_internal, brokers_internal, broker_cnt,
|
||||
sizeof(rd_kafka_metadata_broker_internal_t),
|
||||
rd_kafka_metadata_broker_internal_cmp);
|
||||
|
||||
node->id = id;
|
||||
|
||||
if (!broker)
|
||||
return node;
|
||||
|
||||
node->host = rd_strdup(broker->host);
|
||||
node->port = broker->port;
|
||||
if (broker_internal && broker_internal->rack_id)
|
||||
node->rack = rd_strdup(broker_internal->rack_id);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Copy \p src Node object
|
||||
*
|
||||
|
|
@ -255,16 +297,26 @@ rd_kafka_Node_t *rd_kafka_Node_new(int id,
|
|||
* Use rd_kafka_Node_destroy() to free when done.
|
||||
*/
|
||||
rd_kafka_Node_t *rd_kafka_Node_copy(const rd_kafka_Node_t *src) {
|
||||
return rd_kafka_Node_new(src->id, src->host, src->port, src->rack_id);
|
||||
return rd_kafka_Node_new(src->id, src->host, src->port, src->rack);
|
||||
}
|
||||
|
||||
void rd_kafka_Node_destroy(rd_kafka_Node_t *node) {
|
||||
rd_free(node->host);
|
||||
if (node->rack_id)
|
||||
rd_free(node->rack_id);
|
||||
if (node->rack)
|
||||
rd_free(node->rack);
|
||||
rd_free(node);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Same as rd_kafka_Node_destroy, but for use as callback which accepts
|
||||
* (void *) arguments.
|
||||
*
|
||||
* @param node
|
||||
*/
|
||||
void rd_kafka_Node_free(void *node) {
|
||||
rd_kafka_Node_destroy((rd_kafka_Node_t *)node);
|
||||
}
|
||||
|
||||
int rd_kafka_Node_id(const rd_kafka_Node_t *node) {
|
||||
return node->id;
|
||||
}
|
||||
|
|
@ -276,3 +328,82 @@ const char *rd_kafka_Node_host(const rd_kafka_Node_t *node) {
|
|||
uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node) {
|
||||
return node->port;
|
||||
}
|
||||
|
||||
const char *rd_kafka_Node_rack(const rd_kafka_Node_t *node) {
|
||||
return node->rack;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Creates a new rd_kafka_topic_partition_result_t object.
|
||||
*/
|
||||
|
||||
rd_kafka_topic_partition_result_t *
|
||||
rd_kafka_topic_partition_result_new(const char *topic,
|
||||
int32_t partition,
|
||||
rd_kafka_resp_err_t err,
|
||||
const char *errstr) {
|
||||
|
||||
rd_kafka_topic_partition_result_t *new_result;
|
||||
|
||||
new_result = rd_calloc(1, sizeof(*new_result));
|
||||
new_result->topic_partition =
|
||||
rd_kafka_topic_partition_new(topic, partition);
|
||||
new_result->topic_partition->err = err;
|
||||
new_result->error = rd_kafka_error_new(err, "%s", errstr);
|
||||
|
||||
return new_result;
|
||||
}
|
||||
|
||||
const rd_kafka_topic_partition_t *rd_kafka_topic_partition_result_partition(
|
||||
const rd_kafka_topic_partition_result_t *partition_result) {
|
||||
return partition_result->topic_partition;
|
||||
}
|
||||
|
||||
const rd_kafka_error_t *rd_kafka_topic_partition_result_error(
|
||||
const rd_kafka_topic_partition_result_t *partition_result) {
|
||||
return partition_result->error;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroys the rd_kafka_topic_partition_result_t object.
|
||||
*/
|
||||
void rd_kafka_topic_partition_result_destroy(
|
||||
rd_kafka_topic_partition_result_t *partition_result) {
|
||||
rd_kafka_topic_partition_destroy(partition_result->topic_partition);
|
||||
rd_kafka_error_destroy(partition_result->error);
|
||||
rd_free(partition_result);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroys the array of rd_kafka_topic_partition_result_t objects.
|
||||
*/
|
||||
void rd_kafka_topic_partition_result_destroy_array(
|
||||
rd_kafka_topic_partition_result_t **partition_results,
|
||||
int32_t partition_results_cnt) {
|
||||
int32_t i;
|
||||
for (i = 0; i < partition_results_cnt; i++) {
|
||||
rd_kafka_topic_partition_result_destroy(partition_results[i]);
|
||||
}
|
||||
}
|
||||
|
||||
rd_kafka_topic_partition_result_t *rd_kafka_topic_partition_result_copy(
|
||||
const rd_kafka_topic_partition_result_t *src) {
|
||||
return rd_kafka_topic_partition_result_new(
|
||||
src->topic_partition->topic, src->topic_partition->partition,
|
||||
src->topic_partition->err, src->error->errstr);
|
||||
}
|
||||
|
||||
void *rd_kafka_topic_partition_result_copy_opaque(const void *src,
|
||||
void *opaque) {
|
||||
return rd_kafka_topic_partition_result_copy(
|
||||
(const rd_kafka_topic_partition_result_t *)src);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Frees the memory allocated for a
|
||||
* topic partition result object by calling
|
||||
* its destroy function.
|
||||
*/
|
||||
void rd_kafka_topic_partition_result_free(void *ptr) {
|
||||
rd_kafka_topic_partition_result_destroy(ptr);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018 Magnus Edenhill
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -107,14 +108,67 @@ typedef struct rd_kafka_Node_s {
|
|||
int id; /*< Node id */
|
||||
char *host; /*< Node host */
|
||||
uint16_t port; /*< Node port */
|
||||
char *rack_id; /*< (optional) Node rack id */
|
||||
char *rack; /*< (optional) Node rack id */
|
||||
} rd_kafka_Node_t;
|
||||
|
||||
rd_kafka_Node_t *
|
||||
rd_kafka_Node_new(int id, const char *host, uint16_t port, const char *rack_id);
|
||||
rd_kafka_Node_t *rd_kafka_Node_new(int32_t id,
|
||||
const char *host,
|
||||
uint16_t port,
|
||||
const char *rack_id);
|
||||
|
||||
rd_kafka_Node_t *rd_kafka_Node_new_from_brokers(
|
||||
int32_t id,
|
||||
const struct rd_kafka_metadata_broker *brokers_sorted,
|
||||
const rd_kafka_metadata_broker_internal_t *brokers_internal,
|
||||
int broker_cnt);
|
||||
|
||||
rd_kafka_Node_t *rd_kafka_Node_copy(const rd_kafka_Node_t *src);
|
||||
|
||||
void rd_kafka_Node_destroy(rd_kafka_Node_t *node);
|
||||
|
||||
void rd_kafka_Node_free(void *node);
|
||||
|
||||
/**
|
||||
* @brief Represents a topic partition result.
|
||||
*
|
||||
* @remark Public Type
|
||||
*/
|
||||
struct rd_kafka_topic_partition_result_s {
|
||||
rd_kafka_topic_partition_t *topic_partition;
|
||||
rd_kafka_error_t *error;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Create a new rd_kafka_topic_partition_result_t object.
|
||||
*
|
||||
* @param topic The topic name.
|
||||
* @param partition The partition number.
|
||||
* @param err The error code.
|
||||
* @param errstr The error string.
|
||||
*
|
||||
* @returns a newly allocated rd_kafka_topic_partition_result_t object.
|
||||
* Use rd_kafka_topic_partition_result_destroy() to free object when
|
||||
* done.
|
||||
*/
|
||||
rd_kafka_topic_partition_result_t *
|
||||
rd_kafka_topic_partition_result_new(const char *topic,
|
||||
int32_t partition,
|
||||
rd_kafka_resp_err_t err,
|
||||
const char *errstr);
|
||||
|
||||
rd_kafka_topic_partition_result_t *rd_kafka_topic_partition_result_copy(
|
||||
const rd_kafka_topic_partition_result_t *src);
|
||||
|
||||
void *rd_kafka_topic_partition_result_copy_opaque(const void *src,
|
||||
void *opaque);
|
||||
|
||||
void rd_kafka_topic_partition_result_destroy(
|
||||
rd_kafka_topic_partition_result_t *partition_result);
|
||||
|
||||
void rd_kafka_topic_partition_result_destroy_array(
|
||||
rd_kafka_topic_partition_result_t **partition_results,
|
||||
int32_t partition_results_cnt);
|
||||
|
||||
void rd_kafka_topic_partition_result_free(void *ptr);
|
||||
|
||||
#endif /* _RDKAFKA_AUX_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018 Magnus Edenhill
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012,2013 Magnus Edenhill
|
||||
* Copyright (c) 2012,2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -50,10 +51,10 @@ typedef enum {
|
|||
/* Any state >= STATE_UP means the Kafka protocol layer
|
||||
* is operational (to some degree). */
|
||||
RD_KAFKA_BROKER_STATE_UP,
|
||||
RD_KAFKA_BROKER_STATE_UPDATE,
|
||||
RD_KAFKA_BROKER_STATE_APIVERSION_QUERY,
|
||||
RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE,
|
||||
RD_KAFKA_BROKER_STATE_AUTH_REQ,
|
||||
RD_KAFKA_BROKER_STATE_REAUTH,
|
||||
} rd_kafka_broker_state_t;
|
||||
|
||||
/**
|
||||
|
|
@ -80,8 +81,7 @@ typedef struct rd_kafka_broker_monitor_s {
|
|||
struct rd_kafka_broker_s { /* rd_kafka_broker_t */
|
||||
TAILQ_ENTRY(rd_kafka_broker_s) rkb_link;
|
||||
|
||||
int32_t rkb_nodeid; /**< Broker Node Id.
|
||||
* @locks rkb_lock */
|
||||
int32_t rkb_nodeid; /**< Broker Node Id, read only. */
|
||||
#define RD_KAFKA_NODEID_UA -1
|
||||
|
||||
rd_sockaddr_list_t *rkb_rsal;
|
||||
|
|
@ -191,6 +191,40 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */
|
|||
rd_atomic64_t ts_recv; /**< Timestamp of last receive */
|
||||
} rkb_c;
|
||||
|
||||
struct {
|
||||
struct {
|
||||
int32_t connects; /**< Connection attempts,
|
||||
* successful or not. */
|
||||
} rkb_historic_c;
|
||||
|
||||
struct {
|
||||
rd_avg_t rkb_avg_rtt; /* Current RTT avg */
|
||||
rd_avg_t rkb_avg_throttle; /* Current throttle avg */
|
||||
rd_avg_t
|
||||
rkb_avg_outbuf_latency; /**< Current latency
|
||||
* between buf_enq0
|
||||
* and writing to socket
|
||||
*/
|
||||
rd_avg_t rkb_avg_fetch_latency; /**< Current fetch
|
||||
* latency avg */
|
||||
rd_avg_t rkb_avg_produce_latency; /**< Current produce
|
||||
* latency avg */
|
||||
} rd_avg_current;
|
||||
|
||||
struct {
|
||||
rd_avg_t rkb_avg_rtt; /**< Rolled over RTT avg */
|
||||
rd_avg_t
|
||||
rkb_avg_throttle; /**< Rolled over throttle avg */
|
||||
rd_avg_t rkb_avg_outbuf_latency; /**< Rolled over outbuf
|
||||
* latency avg */
|
||||
rd_avg_t rkb_avg_fetch_latency; /**< Rolled over fetch
|
||||
* latency avg */
|
||||
rd_avg_t
|
||||
rkb_avg_produce_latency; /**< Rolled over produce
|
||||
* latency avg */
|
||||
} rd_avg_rollover;
|
||||
} rkb_telemetry;
|
||||
|
||||
int rkb_req_timeouts; /* Current value */
|
||||
|
||||
thrd_t rkb_thread;
|
||||
|
|
@ -252,6 +286,9 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */
|
|||
/** Absolute time of last connection attempt. */
|
||||
rd_ts_t rkb_ts_connect;
|
||||
|
||||
/** True if a reauthentication is in progress. */
|
||||
rd_bool_t rkb_reauth_in_progress;
|
||||
|
||||
/**< Persistent connection demand is tracked by
|
||||
* a counter for each type of demand.
|
||||
* The broker thread will maintain a persistent connection
|
||||
|
|
@ -323,6 +360,12 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */
|
|||
rd_kafka_resp_err_t err; /**< Last error code */
|
||||
int cnt; /**< Number of identical errors */
|
||||
} rkb_last_err;
|
||||
|
||||
|
||||
rd_kafka_timer_t rkb_sasl_reauth_tmr;
|
||||
|
||||
/** > 0 if this broker thread is terminating */
|
||||
rd_atomic32_t termination_in_progress;
|
||||
};
|
||||
|
||||
#define rd_kafka_broker_keep(rkb) rd_refcnt_add(&(rkb)->rkb_refcnt)
|
||||
|
|
@ -350,12 +393,28 @@ rd_kafka_broker_get_state(rd_kafka_broker_t *rkb) {
|
|||
|
||||
|
||||
/**
|
||||
* @returns true if the broker state is UP or UPDATE
|
||||
* @returns true if the broker state is UP
|
||||
*/
|
||||
#define rd_kafka_broker_state_is_up(state) \
|
||||
((state) == RD_KAFKA_BROKER_STATE_UP || \
|
||||
(state) == RD_KAFKA_BROKER_STATE_UPDATE)
|
||||
#define rd_kafka_broker_state_is_up(state) ((state) == RD_KAFKA_BROKER_STATE_UP)
|
||||
|
||||
/**
|
||||
* @returns true if the broker state is DOWN
|
||||
*/
|
||||
#define rd_kafka_broker_state_is_down(state) \
|
||||
((state) == RD_KAFKA_BROKER_STATE_DOWN)
|
||||
|
||||
/**
|
||||
* @returns true if the error is a broker destroy error, because of
|
||||
* termination or because of decommissioning.
|
||||
*/
|
||||
#define rd_kafka_broker_is_any_err_destroy(err) \
|
||||
((err) == RD_KAFKA_RESP_ERR__DESTROY || \
|
||||
(err) == RD_KAFKA_RESP_ERR__DESTROY_BROKER)
|
||||
|
||||
|
||||
#define rd_kafka_broker_or_instance_terminating(rkb) \
|
||||
(rd_kafka_broker_termination_in_progress(rkb) || \
|
||||
rd_kafka_terminating((rkb)->rkb_rk))
|
||||
|
||||
/**
|
||||
* @returns true if the broker connection is up, else false.
|
||||
|
|
@ -368,6 +427,14 @@ rd_kafka_broker_is_up(rd_kafka_broker_t *rkb) {
|
|||
return rd_kafka_broker_state_is_up(state);
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns true if the broker needs a persistent connection
|
||||
* @locality any
|
||||
*/
|
||||
static RD_UNUSED RD_INLINE rd_bool_t
|
||||
rd_kafka_broker_termination_in_progress(rd_kafka_broker_t *rkb) {
|
||||
return rd_atomic32_get(&rkb->termination_in_progress) > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Broker comparator
|
||||
|
|
@ -403,6 +470,13 @@ int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb,
|
|||
int16_t maxver,
|
||||
int *featuresp);
|
||||
|
||||
int16_t rd_kafka_broker_ApiVersion_supported0(rd_kafka_broker_t *rkb,
|
||||
int16_t ApiKey,
|
||||
int16_t minver,
|
||||
int16_t maxver,
|
||||
int *featuresp,
|
||||
rd_bool_t do_lock);
|
||||
|
||||
rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0_fl(const char *func,
|
||||
int line,
|
||||
rd_kafka_t *rk,
|
||||
|
|
@ -461,7 +535,9 @@ rd_kafka_broker_t *rd_kafka_broker_controller_async(rd_kafka_t *rk,
|
|||
int state,
|
||||
rd_kafka_enq_once_t *eonce);
|
||||
|
||||
int rd_kafka_brokers_add0(rd_kafka_t *rk, const char *brokerlist);
|
||||
int rd_kafka_brokers_add0(rd_kafka_t *rk,
|
||||
const char *brokerlist,
|
||||
rd_bool_t is_bootstrap_server_list);
|
||||
void rd_kafka_broker_set_state(rd_kafka_broker_t *rkb, int state);
|
||||
|
||||
void rd_kafka_broker_fail(rd_kafka_broker_t *rkb,
|
||||
|
|
@ -507,9 +583,13 @@ void rd_kafka_broker_connect_done(rd_kafka_broker_t *rkb, const char *errstr);
|
|||
int rd_kafka_send(rd_kafka_broker_t *rkb);
|
||||
int rd_kafka_recv(rd_kafka_broker_t *rkb);
|
||||
|
||||
void rd_kafka_dr_msgq(rd_kafka_topic_t *rkt,
|
||||
#define rd_kafka_dr_msgq(rkt, rkmq, err) \
|
||||
rd_kafka_dr_msgq0(rkt, rkmq, err, NULL /*no produce result*/)
|
||||
|
||||
void rd_kafka_dr_msgq0(rd_kafka_topic_t *rkt,
|
||||
rd_kafka_msgq_t *rkmq,
|
||||
rd_kafka_resp_err_t err);
|
||||
rd_kafka_resp_err_t err,
|
||||
const rd_kafka_Produce_result_t *presult);
|
||||
|
||||
void rd_kafka_dr_implicit_ack(rd_kafka_broker_t *rkb,
|
||||
rd_kafka_toppar_t *rktp,
|
||||
|
|
@ -558,6 +638,25 @@ int rd_kafka_brokers_wait_state_change_async(rd_kafka_t *rk,
|
|||
rd_kafka_enq_once_t *eonce);
|
||||
void rd_kafka_brokers_broadcast_state_change(rd_kafka_t *rk);
|
||||
|
||||
rd_kafka_broker_t *rd_kafka_broker_random0(const char *func,
|
||||
int line,
|
||||
rd_kafka_t *rk,
|
||||
rd_bool_t is_up,
|
||||
int state,
|
||||
int *filtered_cnt,
|
||||
int (*filter)(rd_kafka_broker_t *rk,
|
||||
void *opaque),
|
||||
void *opaque);
|
||||
|
||||
#define rd_kafka_broker_random(rk, state, filter, opaque) \
|
||||
rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk, rd_false, state, \
|
||||
NULL, filter, opaque)
|
||||
|
||||
#define rd_kafka_broker_random_up(rk, filter, opaque) \
|
||||
rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk, rd_true, \
|
||||
RD_KAFKA_BROKER_STATE_UP, NULL, filter, \
|
||||
opaque)
|
||||
|
||||
|
||||
|
||||
/**
|
||||
|
|
@ -602,6 +701,15 @@ void rd_kafka_broker_monitor_add(rd_kafka_broker_monitor_t *rkbmon,
|
|||
|
||||
void rd_kafka_broker_monitor_del(rd_kafka_broker_monitor_t *rkbmon);
|
||||
|
||||
void rd_kafka_broker_start_reauth_timer(rd_kafka_broker_t *rkb,
|
||||
int64_t connections_max_reauth_ms);
|
||||
|
||||
void rd_kafka_broker_start_reauth_cb(rd_kafka_timers_t *rkts, void *rkb);
|
||||
|
||||
void rd_kafka_broker_decommission(rd_kafka_t *rk,
|
||||
rd_kafka_broker_t *rkb,
|
||||
rd_list_t *wait_thrds);
|
||||
|
||||
int unittest_broker(void);
|
||||
|
||||
#endif /* _RDKAFKA_BROKER_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012-2015, Magnus Edenhill
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* 2023, Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -37,11 +38,10 @@ void rd_kafka_buf_destroy_final(rd_kafka_buf_t *rkbuf) {
|
|||
case RD_KAFKAP_Metadata:
|
||||
if (rkbuf->rkbuf_u.Metadata.topics)
|
||||
rd_list_destroy(rkbuf->rkbuf_u.Metadata.topics);
|
||||
if (rkbuf->rkbuf_u.Metadata.topic_ids)
|
||||
rd_list_destroy(rkbuf->rkbuf_u.Metadata.topic_ids);
|
||||
if (rkbuf->rkbuf_u.Metadata.reason)
|
||||
rd_free(rkbuf->rkbuf_u.Metadata.reason);
|
||||
if (rkbuf->rkbuf_u.Metadata.rko)
|
||||
rd_kafka_op_reply(rkbuf->rkbuf_u.Metadata.rko,
|
||||
RD_KAFKA_RESP_ERR__DESTROY);
|
||||
if (rkbuf->rkbuf_u.Metadata.decr) {
|
||||
/* Decrease metadata cache's full_.._sent state. */
|
||||
mtx_lock(rkbuf->rkbuf_u.Metadata.decr_lock);
|
||||
|
|
@ -120,6 +120,18 @@ rd_kafka_buf_t *rd_kafka_buf_new0(int segcnt, size_t size, int flags) {
|
|||
return rkbuf;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Upgrade request header to flexver by writing header tags.
|
||||
*/
|
||||
void rd_kafka_buf_upgrade_flexver_request(rd_kafka_buf_t *rkbuf) {
|
||||
if (likely(!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER))) {
|
||||
rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_FLEXVER;
|
||||
|
||||
/* Empty request header tags */
|
||||
rd_kafka_buf_write_i8(rkbuf, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Create new request buffer with the request-header written (will
|
||||
|
|
@ -165,12 +177,7 @@ rd_kafka_buf_t *rd_kafka_buf_new_request0(rd_kafka_broker_t *rkb,
|
|||
rd_kafka_buf_write_kstr(rkbuf, rkb->rkb_rk->rk_client_id);
|
||||
|
||||
if (is_flexver) {
|
||||
/* Must set flexver after writing the client id since
|
||||
* it is still a standard non-compact string. */
|
||||
rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_FLEXVER;
|
||||
|
||||
/* Empty request header tags */
|
||||
rd_kafka_buf_write_i8(rkbuf, 0);
|
||||
rd_kafka_buf_upgrade_flexver_request(rkbuf);
|
||||
}
|
||||
|
||||
return rkbuf;
|
||||
|
|
@ -234,6 +241,12 @@ void rd_kafka_bufq_init(rd_kafka_bufq_t *rkbufq) {
|
|||
rd_atomic32_init(&rkbufq->rkbq_msg_cnt, 0);
|
||||
}
|
||||
|
||||
static void rd_kafka_bufq_reset(rd_kafka_bufq_t *rkbufq) {
|
||||
TAILQ_INIT(&rkbufq->rkbq_bufs);
|
||||
rd_atomic32_set(&rkbufq->rkbq_cnt, 0);
|
||||
rd_atomic32_set(&rkbufq->rkbq_msg_cnt, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Concat all buffers from 'src' to tail of 'dst'
|
||||
*/
|
||||
|
|
@ -242,7 +255,7 @@ void rd_kafka_bufq_concat(rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src) {
|
|||
(void)rd_atomic32_add(&dst->rkbq_cnt, rd_atomic32_get(&src->rkbq_cnt));
|
||||
(void)rd_atomic32_add(&dst->rkbq_msg_cnt,
|
||||
rd_atomic32_get(&src->rkbq_msg_cnt));
|
||||
rd_kafka_bufq_init(src);
|
||||
rd_kafka_bufq_reset(src);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -383,7 +396,7 @@ int rd_kafka_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) {
|
|||
rd_assert(rd_buf_len(&rkbuf->rkbuf_buf) > 0);
|
||||
|
||||
if (unlikely(!rkb || rkb->rkb_source == RD_KAFKA_INTERNAL ||
|
||||
rd_kafka_terminating(rkb->rkb_rk) ||
|
||||
rd_kafka_broker_or_instance_terminating(rkb) ||
|
||||
rkbuf->rkbuf_retries + incr_retry >
|
||||
rkbuf->rkbuf_max_retries))
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012-2015, Magnus Edenhill
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -48,21 +49,36 @@ typedef struct rd_tmpabuf_s {
|
|||
size_t of;
|
||||
char *buf;
|
||||
int failed;
|
||||
int assert_on_fail;
|
||||
rd_bool_t assert_on_fail;
|
||||
} rd_tmpabuf_t;
|
||||
|
||||
/**
|
||||
* @brief Allocate new tmpabuf with \p size bytes pre-allocated.
|
||||
* @brief Initialize new tmpabuf of non-final \p size bytes.
|
||||
*/
|
||||
static RD_UNUSED void
|
||||
rd_tmpabuf_new(rd_tmpabuf_t *tab, size_t size, int assert_on_fail) {
|
||||
tab->buf = rd_malloc(size);
|
||||
tab->size = size;
|
||||
rd_tmpabuf_new(rd_tmpabuf_t *tab, size_t size, rd_bool_t assert_on_fail) {
|
||||
tab->buf = NULL;
|
||||
tab->size = RD_ROUNDUP(size, 8);
|
||||
tab->of = 0;
|
||||
tab->failed = 0;
|
||||
tab->assert_on_fail = assert_on_fail;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Add a new allocation of \p _size bytes,
|
||||
* rounded up to maximum word size,
|
||||
* for \p _times times.
|
||||
*/
|
||||
#define rd_tmpabuf_add_alloc_times(_tab, _size, _times) \
|
||||
(_tab)->size += RD_ROUNDUP(_size, 8) * _times
|
||||
|
||||
#define rd_tmpabuf_add_alloc(_tab, _size) \
|
||||
rd_tmpabuf_add_alloc_times(_tab, _size, 1)
|
||||
/**
|
||||
* @brief Finalize tmpabuf pre-allocating tab->size bytes.
|
||||
*/
|
||||
#define rd_tmpabuf_finalize(_tab) (_tab)->buf = rd_malloc((_tab)->size)
|
||||
|
||||
/**
|
||||
* @brief Free memory allocated by tmpabuf
|
||||
*/
|
||||
|
|
@ -360,12 +376,18 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
|
|||
union {
|
||||
struct {
|
||||
rd_list_t *topics; /* Requested topics (char *) */
|
||||
rd_list_t *
|
||||
topic_ids; /* Requested topic ids rd_kafka_Uuid_t */
|
||||
char *reason; /* Textual reason */
|
||||
rd_kafka_op_t *rko; /* Originating rko with replyq
|
||||
* (if any) */
|
||||
rd_bool_t all_topics; /**< Full/All topics requested */
|
||||
rd_bool_t cgrp_update; /**< Update cgrp with topic
|
||||
* status from response. */
|
||||
int32_t cgrp_subscription_version;
|
||||
/**< Consumer group subscription version, to
|
||||
* check before updating cgrp state. */
|
||||
rd_bool_t force_racks; /**< Force the returned metadata
|
||||
* to contain partition to
|
||||
* rack mapping. */
|
||||
|
||||
int *decr; /* Decrement this integer by one
|
||||
* when request is complete:
|
||||
|
|
@ -503,7 +525,7 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
|
|||
#define rd_kafka_buf_skip_to(rkbuf, pos) \
|
||||
do { \
|
||||
size_t __len1 = \
|
||||
(size_t)(pos)-rd_slice_offset(&(rkbuf)->rkbuf_reader); \
|
||||
(size_t)(pos) - rd_slice_offset(&(rkbuf)->rkbuf_reader); \
|
||||
if (__len1 && \
|
||||
!rd_slice_read(&(rkbuf)->rkbuf_reader, NULL, __len1)) \
|
||||
rd_kafka_buf_check_len(rkbuf, __len1); \
|
||||
|
|
@ -682,6 +704,10 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
|
|||
size_t _slen; \
|
||||
char *_dst; \
|
||||
rd_kafka_buf_read_str(rkbuf, &_kstr); \
|
||||
if (RD_KAFKAP_STR_IS_NULL(&_kstr)) { \
|
||||
dst = NULL; \
|
||||
break; \
|
||||
} \
|
||||
_slen = RD_KAFKAP_STR_LEN(&_kstr); \
|
||||
if (!(_dst = rd_tmpabuf_write(tmpabuf, _kstr.str, _slen + 1))) \
|
||||
rd_kafka_buf_parse_fail( \
|
||||
|
|
@ -694,21 +720,44 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
|
|||
} while (0)
|
||||
|
||||
/**
|
||||
* Skip a string.
|
||||
* Skip a string without flexver.
|
||||
*/
|
||||
#define rd_kafka_buf_skip_str(rkbuf) \
|
||||
#define rd_kafka_buf_skip_str_no_flexver(rkbuf) \
|
||||
do { \
|
||||
int16_t _slen; \
|
||||
rd_kafka_buf_read_i16(rkbuf, &_slen); \
|
||||
rd_kafka_buf_skip(rkbuf, RD_KAFKAP_STR_LEN0(_slen)); \
|
||||
} while (0)
|
||||
|
||||
/* Read Kafka Bytes representation (4+N).
|
||||
* The 'kbytes' will be updated to point to rkbuf data */
|
||||
#define rd_kafka_buf_read_bytes(rkbuf, kbytes) \
|
||||
/**
|
||||
* Skip a string (generic).
|
||||
*/
|
||||
#define rd_kafka_buf_skip_str(rkbuf) \
|
||||
do { \
|
||||
int _klen; \
|
||||
if ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { \
|
||||
uint64_t _uva; \
|
||||
rd_kafka_buf_read_uvarint(rkbuf, &_uva); \
|
||||
rd_kafka_buf_skip( \
|
||||
rkbuf, RD_KAFKAP_STR_LEN0(((int64_t)_uva) - 1)); \
|
||||
} else { \
|
||||
rd_kafka_buf_skip_str_no_flexver(rkbuf); \
|
||||
} \
|
||||
} while (0)
|
||||
/**
|
||||
* Read Kafka COMPACT_BYTES representation (VARINT+N) or
|
||||
* standard BYTES representation(4+N).
|
||||
* The 'kbytes' will be updated to point to rkbuf data.
|
||||
*/
|
||||
#define rd_kafka_buf_read_kbytes(rkbuf, kbytes) \
|
||||
do { \
|
||||
int32_t _klen; \
|
||||
if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) { \
|
||||
rd_kafka_buf_read_i32a(rkbuf, _klen); \
|
||||
} else { \
|
||||
uint64_t _uva; \
|
||||
rd_kafka_buf_read_uvarint(rkbuf, &_uva); \
|
||||
_klen = ((int32_t)_uva) - 1; \
|
||||
} \
|
||||
(kbytes)->len = _klen; \
|
||||
if (RD_KAFKAP_BYTES_IS_NULL(kbytes)) { \
|
||||
(kbytes)->data = NULL; \
|
||||
|
|
@ -720,7 +769,6 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
|
|||
rd_kafka_buf_check_len(rkbuf, _klen); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/**
|
||||
* @brief Read \p size bytes from buffer, setting \p *ptr to the start
|
||||
* of the memory region.
|
||||
|
|
@ -737,7 +785,7 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
|
|||
/**
|
||||
* @brief Read varint-lengted Kafka Bytes representation
|
||||
*/
|
||||
#define rd_kafka_buf_read_bytes_varint(rkbuf, kbytes) \
|
||||
#define rd_kafka_buf_read_kbytes_varint(rkbuf, kbytes) \
|
||||
do { \
|
||||
int64_t _len2; \
|
||||
size_t _r = \
|
||||
|
|
@ -784,18 +832,62 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
|
|||
uint64_t _tagtype, _taglen; \
|
||||
rd_kafka_buf_read_uvarint(rkbuf, &_tagtype); \
|
||||
rd_kafka_buf_read_uvarint(rkbuf, &_taglen); \
|
||||
if (_taglen > 1) \
|
||||
rd_kafka_buf_skip(rkbuf, \
|
||||
(size_t)(_taglen - 1)); \
|
||||
if (_taglen > 0) \
|
||||
rd_kafka_buf_skip(rkbuf, (size_t)(_taglen)); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* @brief Write tags at the current position in the buffer.
|
||||
* @remark Currently always writes empty tags.
|
||||
* @remark Change to ..write_uvarint() when actual tags are supported.
|
||||
* @brief Read KIP-482 Tags at current position in the buffer using
|
||||
* the `read_tag` function receiving the `opaque' pointer.
|
||||
*/
|
||||
#define rd_kafka_buf_write_tags(rkbuf) \
|
||||
#define rd_kafka_buf_read_tags(rkbuf, read_tag, ...) \
|
||||
do { \
|
||||
uint64_t _tagcnt; \
|
||||
if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \
|
||||
break; \
|
||||
rd_kafka_buf_read_uvarint(rkbuf, &_tagcnt); \
|
||||
while (_tagcnt-- > 0) { \
|
||||
uint64_t _tagtype, _taglen; \
|
||||
rd_kafka_buf_read_uvarint(rkbuf, &_tagtype); \
|
||||
rd_kafka_buf_read_uvarint(rkbuf, &_taglen); \
|
||||
int _read_tag_resp = \
|
||||
read_tag(rkbuf, _tagtype, _taglen, __VA_ARGS__); \
|
||||
if (_read_tag_resp == -1) \
|
||||
goto err_parse; \
|
||||
if (!_read_tag_resp && _taglen > 0) \
|
||||
rd_kafka_buf_skip(rkbuf, (size_t)(_taglen)); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* @brief Write \p tagcnt tags at the current position in the buffer.
|
||||
* Calling \p write_tag to write each one with \p rkbuf , tagtype
|
||||
* argument and the remaining arguments.
|
||||
*/
|
||||
#define rd_kafka_buf_write_tags(rkbuf, write_tag, tags, tagcnt, ...) \
|
||||
do { \
|
||||
uint64_t i; \
|
||||
if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \
|
||||
break; \
|
||||
rd_kafka_buf_write_uvarint(rkbuf, tagcnt); \
|
||||
for (i = 0; i < tagcnt; i++) { \
|
||||
size_t of_taglen, prev_buf_len; \
|
||||
rd_kafka_buf_write_uvarint(rkbuf, tags[i]); \
|
||||
of_taglen = rd_kafka_buf_write_arraycnt_pos(rkbuf); \
|
||||
prev_buf_len = (rkbuf)->rkbuf_buf.rbuf_len; \
|
||||
write_tag(rkbuf, tags[i], __VA_ARGS__); \
|
||||
rd_kafka_buf_finalize_arraycnt( \
|
||||
rkbuf, of_taglen, \
|
||||
(rkbuf)->rkbuf_buf.rbuf_len - prev_buf_len - 1); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
/**
|
||||
* @brief Write empty tags at the current position in the buffer.
|
||||
*/
|
||||
#define rd_kafka_buf_write_tags_empty(rkbuf) \
|
||||
do { \
|
||||
if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \
|
||||
break; \
|
||||
|
|
@ -815,7 +907,8 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
|
|||
} else { \
|
||||
rd_kafka_buf_read_i32(rkbuf, arrcnt); \
|
||||
} \
|
||||
if (*(arrcnt) < 0 || ((maxval) != -1 && *(arrcnt) > (maxval))) \
|
||||
if (*(arrcnt) < -1 || \
|
||||
((maxval) != -1 && *(arrcnt) > (maxval))) \
|
||||
rd_kafka_buf_parse_fail( \
|
||||
rkbuf, "ApiArrayCnt %" PRId32 " out of range", \
|
||||
*(arrcnt)); \
|
||||
|
|
@ -917,6 +1010,7 @@ rd_kafka_buf_t *rd_kafka_buf_new_request0(rd_kafka_broker_t *rkb,
|
|||
#define rd_kafka_buf_new_flexver_request(rkb, ApiKey, segcnt, size, \
|
||||
is_flexver) \
|
||||
rd_kafka_buf_new_request0(rkb, ApiKey, segcnt, size, is_flexver)
|
||||
void rd_kafka_buf_upgrade_flexver_request(rd_kafka_buf_t *rkbuf);
|
||||
|
||||
rd_kafka_buf_t *
|
||||
rd_kafka_buf_new_shadow(const void *ptr, size_t size, void (*free_cb)(void *));
|
||||
|
|
@ -1072,9 +1166,57 @@ rd_kafka_buf_update_u32(rd_kafka_buf_t *rkbuf, size_t of, uint32_t v) {
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Write varint-encoded signed value to buffer.
|
||||
*/
|
||||
static RD_INLINE size_t rd_kafka_buf_write_varint(rd_kafka_buf_t *rkbuf,
|
||||
int64_t v) {
|
||||
char varint[RD_UVARINT_ENC_SIZEOF(v)];
|
||||
size_t sz;
|
||||
|
||||
sz = rd_uvarint_enc_i64(varint, sizeof(varint), v);
|
||||
|
||||
return rd_kafka_buf_write(rkbuf, varint, sz);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Write varint-encoded unsigned value to buffer.
|
||||
*/
|
||||
static RD_INLINE size_t rd_kafka_buf_write_uvarint(rd_kafka_buf_t *rkbuf,
|
||||
uint64_t v) {
|
||||
char varint[RD_UVARINT_ENC_SIZEOF(v)];
|
||||
size_t sz;
|
||||
|
||||
sz = rd_uvarint_enc_u64(varint, sizeof(varint), v);
|
||||
|
||||
return rd_kafka_buf_write(rkbuf, varint, sz);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief Write standard or flexver arround count field to buffer.
|
||||
* Use this when the array count is known beforehand, else use
|
||||
* rd_kafka_buf_write_arraycnt_pos().
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED size_t
|
||||
rd_kafka_buf_write_arraycnt(rd_kafka_buf_t *rkbuf, size_t cnt) {
|
||||
|
||||
/* Count must fit in 31-bits minus the per-byte carry-bit */
|
||||
rd_assert(cnt + 1 < (size_t)(INT_MAX >> 4));
|
||||
|
||||
if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER))
|
||||
return rd_kafka_buf_write_i32(rkbuf, (int32_t)cnt);
|
||||
|
||||
/* CompactArray has a base of 1, 0 is for Null arrays */
|
||||
cnt += 1;
|
||||
return rd_kafka_buf_write_uvarint(rkbuf, (uint64_t)cnt);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Write array count field to buffer (i32) for later update with
|
||||
* rd_kafka_buf_update_arraycnt().
|
||||
* rd_kafka_buf_finalize_arraycnt().
|
||||
*/
|
||||
#define rd_kafka_buf_write_arraycnt_pos(rkbuf) rd_kafka_buf_write_i32(rkbuf, 0)
|
||||
|
||||
|
|
@ -1092,11 +1234,11 @@ rd_kafka_buf_update_u32(rd_kafka_buf_t *rkbuf, size_t of, uint32_t v) {
|
|||
* and may thus be costly.
|
||||
*/
|
||||
static RD_INLINE void
|
||||
rd_kafka_buf_finalize_arraycnt(rd_kafka_buf_t *rkbuf, size_t of, int cnt) {
|
||||
rd_kafka_buf_finalize_arraycnt(rd_kafka_buf_t *rkbuf, size_t of, size_t cnt) {
|
||||
char buf[sizeof(int32_t)];
|
||||
size_t sz, r;
|
||||
|
||||
rd_assert(cnt >= 0);
|
||||
rd_assert(cnt < (size_t)INT_MAX);
|
||||
|
||||
if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) {
|
||||
rd_kafka_buf_update_i32(rkbuf, of, (int32_t)cnt);
|
||||
|
|
@ -1108,7 +1250,8 @@ rd_kafka_buf_finalize_arraycnt(rd_kafka_buf_t *rkbuf, size_t of, int cnt) {
|
|||
|
||||
sz = rd_uvarint_enc_u64(buf, sizeof(buf), (uint64_t)cnt);
|
||||
rd_assert(!RD_UVARINT_OVERFLOW(sz));
|
||||
|
||||
if (cnt < 127)
|
||||
rd_assert(sz == 1);
|
||||
rd_buf_write_update(&rkbuf->rkbuf_buf, of, buf, sz);
|
||||
|
||||
if (sz < sizeof(int32_t)) {
|
||||
|
|
@ -1141,34 +1284,6 @@ rd_kafka_buf_update_i64(rd_kafka_buf_t *rkbuf, size_t of, int64_t v) {
|
|||
rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Write varint-encoded signed value to buffer.
|
||||
*/
|
||||
static RD_INLINE size_t rd_kafka_buf_write_varint(rd_kafka_buf_t *rkbuf,
|
||||
int64_t v) {
|
||||
char varint[RD_UVARINT_ENC_SIZEOF(v)];
|
||||
size_t sz;
|
||||
|
||||
sz = rd_uvarint_enc_i64(varint, sizeof(varint), v);
|
||||
|
||||
return rd_kafka_buf_write(rkbuf, varint, sz);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Write varint-encoded unsigned value to buffer.
|
||||
*/
|
||||
static RD_INLINE size_t rd_kafka_buf_write_uvarint(rd_kafka_buf_t *rkbuf,
|
||||
uint64_t v) {
|
||||
char varint[RD_UVARINT_ENC_SIZEOF(v)];
|
||||
size_t sz;
|
||||
|
||||
sz = rd_uvarint_enc_u64(varint, sizeof(varint), v);
|
||||
|
||||
return rd_kafka_buf_write(rkbuf, varint, sz);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Write standard (2-byte header) or KIP-482 COMPACT_STRING to buffer.
|
||||
*
|
||||
|
|
@ -1274,13 +1389,15 @@ static RD_INLINE void rd_kafka_buf_push_kstr(rd_kafka_buf_t *rkbuf,
|
|||
static RD_INLINE size_t
|
||||
rd_kafka_buf_write_kbytes(rd_kafka_buf_t *rkbuf,
|
||||
const rd_kafkap_bytes_t *kbytes) {
|
||||
size_t len;
|
||||
size_t len, r;
|
||||
|
||||
if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) {
|
||||
if (!kbytes || RD_KAFKAP_BYTES_IS_NULL(kbytes))
|
||||
return rd_kafka_buf_write_i32(rkbuf, -1);
|
||||
|
||||
if (RD_KAFKAP_BYTES_IS_SERIALIZED(kbytes))
|
||||
return rd_kafka_buf_write(rkbuf, RD_KAFKAP_BYTES_SER(kbytes),
|
||||
return rd_kafka_buf_write(rkbuf,
|
||||
RD_KAFKAP_BYTES_SER(kbytes),
|
||||
RD_KAFKAP_BYTES_SIZE(kbytes));
|
||||
|
||||
len = RD_KAFKAP_BYTES_LEN(kbytes);
|
||||
|
|
@ -1288,16 +1405,24 @@ rd_kafka_buf_write_kbytes(rd_kafka_buf_t *rkbuf,
|
|||
rd_kafka_buf_write(rkbuf, kbytes->data, len);
|
||||
|
||||
return 4 + len;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Push (i.e., no copy) Kafka bytes to buffer iovec
|
||||
/* COMPACT_BYTES lengths are:
|
||||
* 0 = NULL,
|
||||
* 1 = empty
|
||||
* N.. = length + 1
|
||||
*/
|
||||
static RD_INLINE void
|
||||
rd_kafka_buf_push_kbytes(rd_kafka_buf_t *rkbuf,
|
||||
const rd_kafkap_bytes_t *kbytes) {
|
||||
rd_kafka_buf_push(rkbuf, RD_KAFKAP_BYTES_SER(kbytes),
|
||||
RD_KAFKAP_BYTES_SIZE(kbytes), NULL);
|
||||
if (!kbytes)
|
||||
len = 0;
|
||||
else
|
||||
len = kbytes->len + 1;
|
||||
|
||||
r = rd_kafka_buf_write_uvarint(rkbuf, (uint64_t)len);
|
||||
if (len > 1) {
|
||||
rd_kafka_buf_write(rkbuf, kbytes->data, len - 1);
|
||||
r += len - 1;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1381,4 +1506,20 @@ void rd_kafka_buf_set_maker(rd_kafka_buf_t *rkbuf,
|
|||
void *make_opaque,
|
||||
void (*free_make_opaque_cb)(void *make_opaque));
|
||||
|
||||
|
||||
#define rd_kafka_buf_read_uuid(rkbuf, uuid) \
|
||||
do { \
|
||||
rd_kafka_buf_read_i64(rkbuf, \
|
||||
&((uuid)->most_significant_bits)); \
|
||||
rd_kafka_buf_read_i64(rkbuf, \
|
||||
&((uuid)->least_significant_bits)); \
|
||||
(uuid)->base64str[0] = '\0'; \
|
||||
} while (0)
|
||||
|
||||
static RD_UNUSED void rd_kafka_buf_write_uuid(rd_kafka_buf_t *rkbuf,
|
||||
rd_kafka_Uuid_t *uuid) {
|
||||
rd_kafka_buf_write_i64(rkbuf, uuid->most_significant_bits);
|
||||
rd_kafka_buf_write_i64(rkbuf, uuid->least_significant_bits);
|
||||
}
|
||||
|
||||
#endif /* _RDKAFKA_BUF_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2019 Magnus Edenhill
|
||||
* Copyright (c) 2019-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -78,6 +78,8 @@ static void rd_kafka_cert_destroy(rd_kafka_cert_t *cert) {
|
|||
|
||||
if (cert->x509)
|
||||
X509_free(cert->x509);
|
||||
if (cert->chain)
|
||||
sk_X509_pop_free(cert->chain, X509_free);
|
||||
if (cert->pkey)
|
||||
EVP_PKEY_free(cert->pkey);
|
||||
if (cert->store)
|
||||
|
|
@ -314,10 +316,11 @@ static rd_kafka_cert_t *rd_kafka_cert_new(const rd_kafka_conf_t *conf,
|
|||
switch (encoding) {
|
||||
case RD_KAFKA_CERT_ENC_PKCS12: {
|
||||
EVP_PKEY *ign_pkey;
|
||||
STACK_OF(X509) *ca = NULL;
|
||||
|
||||
action = "parse PKCS#12";
|
||||
if (!PKCS12_parse(p12, conf->ssl.key_password,
|
||||
&ign_pkey, &cert->x509, NULL))
|
||||
&ign_pkey, &cert->x509, &ca))
|
||||
goto fail;
|
||||
|
||||
EVP_PKEY_free(ign_pkey);
|
||||
|
|
@ -325,6 +328,13 @@ static rd_kafka_cert_t *rd_kafka_cert_new(const rd_kafka_conf_t *conf,
|
|||
action = "retrieve public key";
|
||||
if (!cert->x509)
|
||||
goto fail;
|
||||
|
||||
if (ca) {
|
||||
if (sk_X509_num(ca) > 0)
|
||||
cert->chain = ca;
|
||||
else
|
||||
sk_X509_pop_free(ca, X509_free);
|
||||
}
|
||||
} break;
|
||||
|
||||
case RD_KAFKA_CERT_ENC_DER:
|
||||
|
|
@ -341,6 +351,20 @@ static rd_kafka_cert_t *rd_kafka_cert_new(const rd_kafka_conf_t *conf,
|
|||
(void *)conf);
|
||||
if (!cert->x509)
|
||||
goto fail;
|
||||
|
||||
cert->chain = sk_X509_new_null();
|
||||
if (rd_kafka_ssl_read_cert_chain_from_BIO(
|
||||
bio, cert->chain, rd_kafka_conf_ssl_passwd_cb,
|
||||
(void *)conf) != 0) {
|
||||
sk_X509_pop_free(cert->chain, X509_free);
|
||||
cert->chain = NULL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (sk_X509_num(cert->chain) == 0) {
|
||||
sk_X509_pop_free(cert->chain, X509_free);
|
||||
cert->chain = NULL;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2019 Magnus Edenhill
|
||||
* Copyright (c) 2019-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -44,8 +44,9 @@ typedef struct rd_kafka_cert_s {
|
|||
rd_refcnt_t refcnt;
|
||||
#if WITH_SSL
|
||||
X509 *x509; /**< Certificate (public key) */
|
||||
STACK_OF(X509) * chain; /**< Certificate chain (public key) */
|
||||
EVP_PKEY *pkey; /**< Private key */
|
||||
X509_STORE *store; /**< CA certificate chain store */
|
||||
X509_STORE *store; /**< CA trusted certificates */
|
||||
#endif
|
||||
} rd_kafka_cert_t;
|
||||
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012-2015, Magnus Edenhill
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* 2023, Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -56,6 +57,7 @@ typedef struct rd_kafka_cgrp_s {
|
|||
rd_kafkap_str_t *rkcg_member_id; /* Last assigned MemberId */
|
||||
rd_kafkap_str_t *rkcg_group_instance_id;
|
||||
const rd_kafkap_str_t *rkcg_client_id;
|
||||
rd_kafkap_str_t *rkcg_client_rack;
|
||||
|
||||
enum {
|
||||
/* Init state */
|
||||
|
|
@ -163,6 +165,10 @@ typedef struct rd_kafka_cgrp_s {
|
|||
|
||||
rd_interval_t rkcg_coord_query_intvl; /* Coordinator query intvl*/
|
||||
rd_interval_t rkcg_heartbeat_intvl; /* Heartbeat intvl */
|
||||
rd_kafka_timer_t rkcg_serve_timer; /* Timer for next serve. */
|
||||
int rkcg_heartbeat_intvl_ms; /* KIP 848: received
|
||||
* heartbeat interval in
|
||||
* milliseconds */
|
||||
rd_interval_t rkcg_join_intvl; /* JoinGroup interval */
|
||||
rd_interval_t rkcg_timeout_scan_intvl; /* Timeout scanner */
|
||||
|
||||
|
|
@ -179,7 +185,8 @@ typedef struct rd_kafka_cgrp_s {
|
|||
|
||||
rd_list_t rkcg_toppars; /* Toppars subscribed to*/
|
||||
|
||||
int32_t rkcg_generation_id; /* Current generation id */
|
||||
int32_t rkcg_generation_id; /* Current generation id (classic)
|
||||
* or member epoch (consumer). */
|
||||
|
||||
rd_kafka_assignor_t *rkcg_assignor; /**< The current partition
|
||||
* assignor. used by both
|
||||
|
|
@ -190,6 +197,12 @@ typedef struct rd_kafka_cgrp_s {
|
|||
int32_t rkcg_coord_id; /**< Current coordinator id,
|
||||
* or -1 if not known. */
|
||||
|
||||
rd_kafka_group_protocol_t
|
||||
rkcg_group_protocol; /**< Group protocol to use */
|
||||
|
||||
rd_kafkap_str_t *rkcg_group_remote_assignor; /**< Group remote
|
||||
* assignor to use */
|
||||
|
||||
rd_kafka_broker_t *rkcg_curr_coord; /**< Current coordinator
|
||||
* broker handle, or NULL.
|
||||
* rkcg_coord's nodename is
|
||||
|
|
@ -217,9 +230,33 @@ typedef struct rd_kafka_cgrp_s {
|
|||
rd_kafka_topic_partition_list_t *rkcg_errored_topics;
|
||||
/** If a SUBSCRIBE op is received during a COOPERATIVE rebalance,
|
||||
* actioning this will be postponed until after the rebalance
|
||||
* completes. The waiting subscription is stored here.
|
||||
* Mutually exclusive with rkcg_next_subscription. */
|
||||
* completes. The waiting subscription is stored here. */
|
||||
rd_kafka_topic_partition_list_t *rkcg_next_subscription;
|
||||
|
||||
/**
|
||||
* Subscription regex pattern. All the provided regex patterns are
|
||||
* stored as a single string with each pattern separated by '|'.
|
||||
*
|
||||
* Only applicable for the consumer protocol introduced in KIP-848.
|
||||
*
|
||||
* rkcg_subscription = rkcg_subscription_topics +
|
||||
* rkcg_subscription_regex
|
||||
*/
|
||||
rd_kafkap_str_t *rkcg_subscription_regex;
|
||||
|
||||
/**
|
||||
* Full topic names extracted out from the rkcg_subscription.
|
||||
*
|
||||
* Only applicable for the consumer protocol introduced in KIP-848.
|
||||
*
|
||||
* For the consumer protocol, this field doesn't include regex
|
||||
* subscriptions. For that please refer `rkcg_subscription_regex`
|
||||
*
|
||||
* rkcg_subscription = rkcg_subscription_topics +
|
||||
* rkcg_subscription_regex
|
||||
*/
|
||||
rd_kafka_topic_partition_list_t *rkcg_subscription_topics;
|
||||
|
||||
/** If a (un)SUBSCRIBE op is received during a COOPERATIVE rebalance,
|
||||
* actioning this will be posponed until after the rebalance
|
||||
* completes. This flag is used to signal a waiting unsubscribe
|
||||
|
|
@ -255,10 +292,52 @@ typedef struct rd_kafka_cgrp_s {
|
|||
* currently in-progress incremental unassign. */
|
||||
rd_kafka_topic_partition_list_t *rkcg_rebalance_incr_assignment;
|
||||
|
||||
/** Current acked assignment, start with an empty list. */
|
||||
rd_kafka_topic_partition_list_t *rkcg_current_assignment;
|
||||
|
||||
/** Assignment the is currently reconciling.
|
||||
* Can be NULL in case there's no reconciliation ongoing. */
|
||||
rd_kafka_topic_partition_list_t *rkcg_target_assignment;
|
||||
|
||||
/** Next assignment that will be reconciled once current
|
||||
* reconciliation finishes. Can be NULL. */
|
||||
rd_kafka_topic_partition_list_t *rkcg_next_target_assignment;
|
||||
|
||||
/** Number of backoff retries when expediting next heartbeat. */
|
||||
int rkcg_expedite_heartbeat_retries;
|
||||
|
||||
/** Flags for KIP-848 state machine. */
|
||||
int rkcg_consumer_flags;
|
||||
/** Coordinator is waiting for an acknowledgement of currently reconciled
|
||||
* target assignment. Cleared when an HB succeeds
|
||||
* after reconciliation finishes. */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_WAIT_ACK 0x1
|
||||
/** Member is sending an acknowledgement for a reconciled assignment */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_SENDING_ACK 0x2
|
||||
/** A new subscription needs to be sent to the Coordinator. */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_SEND_NEW_SUBSCRIPTION 0x4
|
||||
/** A new subscription is being sent to the Coordinator. */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_SENDING_NEW_SUBSCRIPTION 0x8
|
||||
/** Consumer has subscribed at least once,
|
||||
* if it didn't happen rebalance protocol is still
|
||||
* considered NONE, otherwise it depends on the
|
||||
* configured partition assignors. */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_SUBSCRIBED_ONCE 0x10
|
||||
/** Send a complete request in next heartbeat */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_SEND_FULL_REQUEST 0x20
|
||||
/** Member is fenced, need to rejoin */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN 0x40
|
||||
/** Member is fenced, rejoining */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN_TO_COMPLETE 0x80
|
||||
/** Serve pending assignments after heartbeat */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_SERVE_PENDING 0x100
|
||||
|
||||
/** Rejoin the group following a currently in-progress
|
||||
* incremental unassign. */
|
||||
rd_bool_t rkcg_rebalance_rejoin;
|
||||
|
||||
rd_ts_t rkcg_ts_last_err; /* Timestamp of last error
|
||||
* propagated to application */
|
||||
rd_kafka_resp_err_t rkcg_last_err; /* Last error propagated to
|
||||
* application.
|
||||
* This is for silencing
|
||||
|
|
@ -280,6 +359,8 @@ typedef struct rd_kafka_cgrp_s {
|
|||
|
||||
rd_atomic32_t rkcg_terminated; /**< Consumer has been closed */
|
||||
|
||||
rd_atomic32_t rkcg_subscription_version; /**< Subscription version */
|
||||
|
||||
/* Protected by rd_kafka_*lock() */
|
||||
struct {
|
||||
rd_ts_t ts_rebalance; /* Timestamp of
|
||||
|
|
@ -293,6 +374,9 @@ typedef struct rd_kafka_cgrp_s {
|
|||
* assignment */
|
||||
} rkcg_c;
|
||||
|
||||
/* Timestamp of last rebalance start */
|
||||
rd_ts_t rkcg_ts_rebalance_start;
|
||||
|
||||
} rd_kafka_cgrp_t;
|
||||
|
||||
|
||||
|
|
@ -313,6 +397,7 @@ extern const char *rd_kafka_cgrp_join_state_names[];
|
|||
|
||||
void rd_kafka_cgrp_destroy_final(rd_kafka_cgrp_t *rkcg);
|
||||
rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk,
|
||||
rd_kafka_group_protocol_t group_protocol,
|
||||
const rd_kafkap_str_t *group_id,
|
||||
const rd_kafkap_str_t *client_id);
|
||||
void rd_kafka_cgrp_serve(rd_kafka_cgrp_t *rkcg);
|
||||
|
|
@ -346,6 +431,12 @@ void rd_kafka_cgrp_metadata_update_check(rd_kafka_cgrp_t *rkcg,
|
|||
rd_bool_t do_join);
|
||||
#define rd_kafka_cgrp_get(rk) ((rk)->rk_cgrp)
|
||||
|
||||
#define rd_kafka_cgrp_same_subscription_version(rk_cgrp, \
|
||||
cgrp_subscription_version) \
|
||||
((rk_cgrp) && \
|
||||
(cgrp_subscription_version == -1 || \
|
||||
rd_atomic32_get(&(rk_cgrp)->rkcg_subscription_version) == \
|
||||
cgrp_subscription_version))
|
||||
|
||||
void rd_kafka_cgrp_assigned_offsets_commit(
|
||||
rd_kafka_cgrp_t *rkcg,
|
||||
|
|
@ -380,4 +471,7 @@ rd_kafka_rebalance_protocol2str(rd_kafka_rebalance_protocol_t protocol) {
|
|||
}
|
||||
}
|
||||
|
||||
void rd_kafka_cgrp_consumer_expedite_next_heartbeat(rd_kafka_cgrp_t *rkcg,
|
||||
const char *reason);
|
||||
|
||||
#endif /* _RDKAFKA_CGRP_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012-2022 Magnus Edenhill
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -55,10 +56,15 @@
|
|||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
#ifdef WITH_OAUTHBEARER_OIDC
|
||||
#include <curl/curl.h>
|
||||
#endif
|
||||
|
||||
struct rd_kafka_property {
|
||||
rd_kafka_conf_scope_t scope;
|
||||
const char *name;
|
||||
enum { _RK_C_STR,
|
||||
enum {
|
||||
_RK_C_STR,
|
||||
_RK_C_INT,
|
||||
_RK_C_DBL, /* Double */
|
||||
_RK_C_S2I, /* String to Integer mapping.
|
||||
|
|
@ -89,7 +95,7 @@ struct rd_kafka_property {
|
|||
const char *str;
|
||||
const char *unsupported; /**< Reason for value not being
|
||||
* supported in this build. */
|
||||
} s2i[20]; /* _RK_C_S2I and _RK_C_S2F */
|
||||
} s2i[21]; /* _RK_C_S2I and _RK_C_S2F */
|
||||
|
||||
const char *unsupported; /**< Reason for propery not being supported
|
||||
* in this build.
|
||||
|
|
@ -197,6 +203,15 @@ struct rd_kafka_property {
|
|||
"available at build time"
|
||||
#endif
|
||||
|
||||
#if WITH_OAUTHBEARER_OIDC
|
||||
#define _UNSUPPORTED_HTTPS .unsupported = NULL
|
||||
#else
|
||||
#define _UNSUPPORTED_HTTPS \
|
||||
.unsupported = \
|
||||
"HTTPS calls depend on libcurl and OpenSSL which were not " \
|
||||
"available at build time"
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
#define _UNSUPPORTED_WIN32_GSSAPI \
|
||||
.unsupported = \
|
||||
|
|
@ -436,6 +451,34 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
1, 1000000, 1000000},
|
||||
{_RK_GLOBAL, "max.in.flight", _RK_C_ALIAS,
|
||||
.sdef = "max.in.flight.requests.per.connection"},
|
||||
{_RK_GLOBAL, "metadata.recovery.strategy", _RK_C_S2I,
|
||||
_RK(metadata_recovery_strategy),
|
||||
"Controls how the client recovers when none of the brokers known to it "
|
||||
"is available. If set to `none`, the client doesn't re-bootstrap. "
|
||||
"If set to `rebootstrap`, the client repeats the bootstrap process "
|
||||
"using `bootstrap.servers` and brokers added through "
|
||||
"`rd_kafka_brokers_add()`. Rebootstrapping is useful when a client "
|
||||
"communicates with brokers so infrequently that the set of brokers "
|
||||
"may change entirely before the client refreshes metadata. "
|
||||
"Metadata recovery is triggered when all last-known brokers appear "
|
||||
"unavailable simultaneously or the client cannot refresh metadata within "
|
||||
"`metadata.recovery.rebootstrap.trigger.ms` or it's requested in a "
|
||||
"metadata response.",
|
||||
.vdef = RD_KAFKA_METADATA_RECOVERY_STRATEGY_REBOOTSTRAP,
|
||||
.s2i = {{RD_KAFKA_METADATA_RECOVERY_STRATEGY_NONE, "none"},
|
||||
{RD_KAFKA_METADATA_RECOVERY_STRATEGY_REBOOTSTRAP, "rebootstrap"},
|
||||
{0, NULL}}},
|
||||
{_RK_GLOBAL, "metadata.recovery.rebootstrap.trigger.ms", _RK_C_INT,
|
||||
_RK(metadata_recovery_rebootstrap_trigger_ms),
|
||||
"If a client configured to rebootstrap using "
|
||||
"`metadata.recovery.strategy=rebootstrap` "
|
||||
"is unable to obtain metadata from any "
|
||||
"of the brokers for this interval, "
|
||||
"client repeats the bootstrap process using "
|
||||
"`bootstrap.servers` configuration "
|
||||
"and brokers added through "
|
||||
"`rd_kafka_brokers_add()`.",
|
||||
0, INT_MAX, 300000},
|
||||
{_RK_GLOBAL | _RK_DEPRECATED | _RK_HIDDEN, "metadata.request.timeout.ms",
|
||||
_RK_C_INT, _RK(metadata_request_timeout_ms), "Not used.", 10, 900 * 1000,
|
||||
10},
|
||||
|
|
@ -457,10 +500,12 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
{_RK_GLOBAL, "topic.metadata.refresh.fast.interval.ms", _RK_C_INT,
|
||||
_RK(metadata_refresh_fast_interval_ms),
|
||||
"When a topic loses its leader a new metadata request will be "
|
||||
"enqueued with this initial interval, exponentially increasing "
|
||||
"enqueued immediately and then with this initial interval, exponentially "
|
||||
"increasing upto `retry.backoff.max.ms`, "
|
||||
"until the topic metadata has been refreshed. "
|
||||
"If not set explicitly, it will be defaulted to `retry.backoff.ms`. "
|
||||
"This is used to recover quickly from transitioning leader brokers.",
|
||||
1, 60 * 1000, 250},
|
||||
1, 60 * 1000, 100},
|
||||
{_RK_GLOBAL | _RK_DEPRECATED, "topic.metadata.refresh.fast.cnt", _RK_C_INT,
|
||||
_RK(metadata_refresh_fast_cnt), "No longer used.", 0, 1000, 10},
|
||||
{_RK_GLOBAL, "topic.metadata.refresh.sparse", _RK_C_BOOL,
|
||||
|
|
@ -508,6 +553,7 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
{RD_KAFKA_DBG_MOCK, "mock"},
|
||||
{RD_KAFKA_DBG_ASSIGNOR, "assignor"},
|
||||
{RD_KAFKA_DBG_CONF, "conf"},
|
||||
{RD_KAFKA_DBG_TELEMETRY, "telemetry"},
|
||||
{RD_KAFKA_DBG_ALL, "all"}}},
|
||||
{_RK_GLOBAL, "socket.timeout.ms", _RK_C_INT, _RK(socket_timeout_ms),
|
||||
"Default timeout for network requests. "
|
||||
|
|
@ -536,7 +582,7 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
#endif
|
||||
},
|
||||
{_RK_GLOBAL, "socket.nagle.disable", _RK_C_BOOL, _RK(socket_nagle_disable),
|
||||
"Disable the Nagle algorithm (TCP_NODELAY) on broker sockets.", 0, 1, 0
|
||||
"Disable the Nagle algorithm (TCP_NODELAY) on broker sockets.", 0, 1, 1
|
||||
#ifndef TCP_NODELAY
|
||||
,
|
||||
.unsupported = "TCP_NODELAY not available at build time"
|
||||
|
|
@ -698,8 +744,10 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
"The application should mask this signal as an internal "
|
||||
"signal handler is installed.",
|
||||
0, 128, 0},
|
||||
{_RK_GLOBAL | _RK_HIGH, "api.version.request", _RK_C_BOOL,
|
||||
{_RK_GLOBAL | _RK_HIGH | _RK_DEPRECATED, "api.version.request", _RK_C_BOOL,
|
||||
_RK(api_version_request),
|
||||
"**Post-deprecation actions: remove this configuration property, "
|
||||
"brokers < 0.10.0 won't be supported anymore in librdkafka 3.x.** "
|
||||
"Request broker's supported API versions to adjust functionality to "
|
||||
"available protocol features. If set to false, or the "
|
||||
"ApiVersionRequest fails, the fallback version "
|
||||
|
|
@ -711,16 +759,20 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
{_RK_GLOBAL, "api.version.request.timeout.ms", _RK_C_INT,
|
||||
_RK(api_version_request_timeout_ms),
|
||||
"Timeout for broker API version requests.", 1, 5 * 60 * 1000, 10 * 1000},
|
||||
{_RK_GLOBAL | _RK_MED, "api.version.fallback.ms", _RK_C_INT,
|
||||
_RK(api_version_fallback_ms),
|
||||
{_RK_GLOBAL | _RK_MED | _RK_DEPRECATED, "api.version.fallback.ms",
|
||||
_RK_C_INT, _RK(api_version_fallback_ms),
|
||||
"**Post-deprecation actions: remove this configuration property, "
|
||||
"brokers < 0.10.0 won't be supported anymore in librdkafka 3.x.** "
|
||||
"Dictates how long the `broker.version.fallback` fallback is used "
|
||||
"in the case the ApiVersionRequest fails. "
|
||||
"**NOTE**: The ApiVersionRequest is only issued when a new connection "
|
||||
"to the broker is made (such as after an upgrade).",
|
||||
0, 86400 * 7 * 1000, 0},
|
||||
|
||||
{_RK_GLOBAL | _RK_MED, "broker.version.fallback", _RK_C_STR,
|
||||
_RK(broker_version_fallback),
|
||||
{_RK_GLOBAL | _RK_MED | _RK_DEPRECATED, "broker.version.fallback",
|
||||
_RK_C_STR, _RK(broker_version_fallback),
|
||||
"**Post-deprecation actions: remove this configuration property, "
|
||||
"brokers < 0.10.0 won't be supported anymore in librdkafka 3.x.** "
|
||||
"Older broker versions (before 0.10.0) provide no way for a client to "
|
||||
"query "
|
||||
"for supported protocol features "
|
||||
|
|
@ -821,6 +873,29 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
"If OpenSSL is dynamically linked the OpenSSL library's default "
|
||||
"path will be used (see `OPENSSLDIR` in `openssl version -a`).",
|
||||
_UNSUPPORTED_SSL},
|
||||
{_RK_GLOBAL, "https.ca.location", _RK_C_STR, _RK(https.ca_location),
|
||||
"File or directory path to CA certificate(s) for verifying "
|
||||
"HTTPS endpoints, like `sasl.oauthbearer.token.endpoint.url` used for "
|
||||
"OAUTHBEARER/OIDC authentication. "
|
||||
"Mutually exclusive with `https.ca.pem`. "
|
||||
"Defaults: "
|
||||
"On Windows the system's CA certificates are automatically looked "
|
||||
"up in the Windows Root certificate store. "
|
||||
"On Mac OSX this configuration defaults to `probe`. "
|
||||
"It is recommended to install openssl using Homebrew, "
|
||||
"to provide CA certificates. "
|
||||
"On Linux install the distribution's ca-certificates package. "
|
||||
"If OpenSSL is statically linked or `https.ca.location` is set to "
|
||||
"`probe` a list of standard paths will be probed and the first one "
|
||||
"found will be used as the default CA certificate location path. "
|
||||
"If OpenSSL is dynamically linked the OpenSSL library's default "
|
||||
"path will be used (see `OPENSSLDIR` in `openssl version -a`).",
|
||||
_UNSUPPORTED_HTTPS},
|
||||
{_RK_GLOBAL, "https.ca.pem", _RK_C_STR, _RK(https.ca_pem),
|
||||
"CA certificate string (PEM format) for verifying HTTPS endpoints. "
|
||||
"Mutually exclusive with `https.ca.location`. "
|
||||
"Optional: see `https.ca.location`.",
|
||||
_UNSUPPORTED_HTTPS},
|
||||
{_RK_GLOBAL | _RK_SENSITIVE, "ssl.ca.pem", _RK_C_STR, _RK(ssl.ca_pem),
|
||||
"CA certificate string (PEM format) for verifying the broker's key.",
|
||||
_UNSUPPORTED_SSL},
|
||||
|
|
@ -897,11 +972,13 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
"Java TrustStores are not supported, use `ssl.ca.location` "
|
||||
"and a certificate file instead. "
|
||||
"See "
|
||||
"https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka "
|
||||
"https://github.com/confluentinc/librdkafka/"
|
||||
"wiki/Using-SSL-with-librdkafka "
|
||||
"for more information."},
|
||||
{_RK_GLOBAL, "sasl.jaas.config", _RK_C_INVALID, _RK(dummy),
|
||||
"Java JAAS configuration is not supported, see "
|
||||
"https://github.com/edenhill/librdkafka/wiki/Using-SASL-with-librdkafka "
|
||||
"https://github.com/confluentinc/librdkafka/"
|
||||
"wiki/Using-SASL-with-librdkafka "
|
||||
"for more information."},
|
||||
|
||||
{_RK_GLOBAL | _RK_HIGH, "sasl.mechanisms", _RK_C_STR, _RK(sasl.mechanisms),
|
||||
|
|
@ -1012,7 +1089,11 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
"authorization server handles. "
|
||||
"Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
|
||||
_UNSUPPORTED_OIDC},
|
||||
{_RK_GLOBAL, "sasl.oauthbearer.client.secret", _RK_C_STR,
|
||||
{_RK_GLOBAL, "sasl.oauthbearer.client.credentials.client.id", _RK_C_ALIAS,
|
||||
.sdef = "sasl.oauthbearer.client.id"},
|
||||
{_RK_GLOBAL, "sasl.oauthbearer.client.credentials.client.secret",
|
||||
_RK_C_ALIAS, .sdef = "sasl.oauthbearer.client.secret"},
|
||||
{_RK_GLOBAL | _RK_SENSITIVE, "sasl.oauthbearer.client.secret", _RK_C_STR,
|
||||
_RK(sasl.oauthbearer.client_secret),
|
||||
"Client secret only known to the application and the "
|
||||
"authorization server. This should be a sufficiently random string "
|
||||
|
|
@ -1037,6 +1118,94 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
"OAuth/OIDC issuer token endpoint HTTP(S) URI used to retrieve token. "
|
||||
"Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
|
||||
_UNSUPPORTED_OIDC},
|
||||
{
|
||||
_RK_GLOBAL,
|
||||
"sasl.oauthbearer.grant.type",
|
||||
_RK_C_S2I,
|
||||
_RK(sasl.oauthbearer.grant_type),
|
||||
"OAuth grant type to use when communicating with the identity "
|
||||
"provider.",
|
||||
_UNSUPPORTED_OIDC,
|
||||
.vdef = RD_KAFKA_SASL_OAUTHBEARER_GRANT_TYPE_CLIENT_CREDENTIALS,
|
||||
.s2i = {{RD_KAFKA_SASL_OAUTHBEARER_GRANT_TYPE_CLIENT_CREDENTIALS,
|
||||
"client_credentials"},
|
||||
{RD_KAFKA_SASL_OAUTHBEARER_GRANT_TYPE_JWT_BEARER,
|
||||
"urn:ietf:params:oauth:grant-type:jwt-bearer"}},
|
||||
},
|
||||
{_RK_GLOBAL, "sasl.oauthbearer.assertion.algorithm", _RK_C_S2I,
|
||||
_RK(sasl.oauthbearer.assertion.algorithm),
|
||||
"Algorithm the client should use to sign the assertion sent "
|
||||
"to the identity provider and in the OAuth alg header in the JWT "
|
||||
"assertion.",
|
||||
_UNSUPPORTED_OIDC,
|
||||
.vdef = RD_KAFKA_SASL_OAUTHBEARER_ASSERTION_ALGORITHM_RS256,
|
||||
.s2i = {{RD_KAFKA_SASL_OAUTHBEARER_ASSERTION_ALGORITHM_RS256, "RS256"},
|
||||
{RD_KAFKA_SASL_OAUTHBEARER_ASSERTION_ALGORITHM_ES256, "ES256"}}},
|
||||
{_RK_GLOBAL | _RK_SENSITIVE, "sasl.oauthbearer.assertion.private.key.file",
|
||||
_RK_C_STR, _RK(sasl.oauthbearer.assertion.private_key.file),
|
||||
"Path to client's private key (PEM) used for authentication "
|
||||
"when using the JWT assertion.",
|
||||
_UNSUPPORTED_OIDC},
|
||||
{_RK_GLOBAL | _RK_SENSITIVE,
|
||||
"sasl.oauthbearer.assertion.private.key.passphrase", _RK_C_STR,
|
||||
_RK(sasl.oauthbearer.assertion.private_key.passphrase),
|
||||
"Private key passphrase for `sasl.oauthbearer.assertion.private.key.file`"
|
||||
" or `sasl.oauthbearer.assertion.private.key.pem`.",
|
||||
_UNSUPPORTED_OIDC},
|
||||
{_RK_GLOBAL | _RK_SENSITIVE, "sasl.oauthbearer.assertion.private.key.pem",
|
||||
_RK_C_STR, _RK(sasl.oauthbearer.assertion.private_key.pem),
|
||||
"Client's private key (PEM) used for authentication "
|
||||
"when using the JWT assertion.",
|
||||
_UNSUPPORTED_OIDC},
|
||||
{_RK_GLOBAL, "sasl.oauthbearer.assertion.file", _RK_C_STR,
|
||||
_RK(sasl.oauthbearer.assertion.file),
|
||||
"Path to the assertion file. "
|
||||
"Only used when `sasl.oauthbearer.method` is set to \"oidc\" and JWT "
|
||||
"assertion is needed.",
|
||||
_UNSUPPORTED_OIDC},
|
||||
{_RK_GLOBAL, "sasl.oauthbearer.assertion.claim.aud", _RK_C_STR,
|
||||
_RK(sasl.oauthbearer.assertion.claim.audience),
|
||||
"JWT audience claim. "
|
||||
"Only used when `sasl.oauthbearer.method` is set to \"oidc\" and JWT "
|
||||
"assertion is needed.",
|
||||
_UNSUPPORTED_OIDC},
|
||||
{_RK_GLOBAL, "sasl.oauthbearer.assertion.claim.exp.seconds", _RK_C_INT,
|
||||
_RK(sasl.oauthbearer.assertion.claim.expiration_s),
|
||||
"Assertion expiration time in seconds. "
|
||||
"Only used when `sasl.oauthbearer.method` is set to \"oidc\" and JWT "
|
||||
"assertion is needed.",
|
||||
1, INT_MAX, 300, _UNSUPPORTED_OIDC},
|
||||
{_RK_GLOBAL, "sasl.oauthbearer.assertion.claim.iss", _RK_C_STR,
|
||||
_RK(sasl.oauthbearer.assertion.claim.issuer),
|
||||
"JWT issuer claim. "
|
||||
"Only used when `sasl.oauthbearer.method` is set to \"oidc\" and JWT "
|
||||
"assertion is needed.",
|
||||
_UNSUPPORTED_OIDC},
|
||||
{_RK_GLOBAL, "sasl.oauthbearer.assertion.claim.jti.include", _RK_C_BOOL,
|
||||
_RK(sasl.oauthbearer.assertion.claim.jti_include),
|
||||
"JWT ID claim. When set to `true`, a random UUID is generated. "
|
||||
"Only used when `sasl.oauthbearer.method` is set to \"oidc\" and JWT "
|
||||
"assertion is needed.",
|
||||
0, 1, 0, _UNSUPPORTED_OIDC},
|
||||
{_RK_GLOBAL, "sasl.oauthbearer.assertion.claim.nbf.seconds", _RK_C_INT,
|
||||
_RK(sasl.oauthbearer.assertion.claim.not_before_s),
|
||||
"Assertion not before time in seconds. "
|
||||
"Only used when `sasl.oauthbearer.method` is set to \"oidc\" and JWT "
|
||||
"assertion is needed.",
|
||||
0, INT_MAX, 60, _UNSUPPORTED_OIDC},
|
||||
{_RK_GLOBAL, "sasl.oauthbearer.assertion.claim.sub", _RK_C_STR,
|
||||
_RK(sasl.oauthbearer.assertion.claim.subject),
|
||||
"JWT subject claim. "
|
||||
"Only used when `sasl.oauthbearer.method` is set to \"oidc\" and JWT "
|
||||
"assertion is needed.",
|
||||
_UNSUPPORTED_OIDC},
|
||||
{_RK_GLOBAL, "sasl.oauthbearer.assertion.jwt.template.file", _RK_C_STR,
|
||||
_RK(sasl.oauthbearer.assertion.jwt_template_file),
|
||||
"Path to the JWT template file. "
|
||||
"Only used when `sasl.oauthbearer.method` is set to \"oidc\" and JWT "
|
||||
"assertion is needed.",
|
||||
_UNSUPPORTED_OIDC},
|
||||
|
||||
|
||||
/* Plugins */
|
||||
{_RK_GLOBAL, "plugin.library.paths", _RK_C_STR, _RK(plugin_paths),
|
||||
|
|
@ -1104,9 +1273,10 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
"members of the group to assign partitions to group members. If "
|
||||
"there is more than one eligible strategy, preference is "
|
||||
"determined by the order of this list (strategies earlier in the "
|
||||
"list have higher priority). "
|
||||
"Cooperative and non-cooperative (eager) strategies must not be "
|
||||
"mixed. "
|
||||
"list have higher priority). Cooperative and non-cooperative (eager)"
|
||||
"strategies must not be mixed. `partition.assignment.strategy` is not "
|
||||
"supported for "
|
||||
"`group.protocol=consumer`. Use `group.remote.assignor` instead. "
|
||||
"Available strategies: range, roundrobin, cooperative-sticky.",
|
||||
.sdef = "range,roundrobin"},
|
||||
{_RK_GLOBAL | _RK_CGRP | _RK_HIGH, "session.timeout.ms", _RK_C_INT,
|
||||
|
|
@ -1116,20 +1286,52 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
"to indicate its liveness to the broker. If no hearts are "
|
||||
"received by the broker for a group member within the "
|
||||
"session timeout, the broker will remove the consumer from "
|
||||
"the group and trigger a rebalance. "
|
||||
"The allowed range is configured with the **broker** configuration "
|
||||
"the group and trigger a rebalance. The "
|
||||
"allowed range is configured with the **broker** configuration "
|
||||
"properties `group.min.session.timeout.ms` and "
|
||||
"`group.max.session.timeout.ms`. "
|
||||
"`group.max.session.timeout.ms`. `session.timeout.ms` is not supported "
|
||||
"for `group.protocol=consumer`. It is set with the broker configuration "
|
||||
"property "
|
||||
"`group.consumer.session.timeout.ms` by default or can be configured "
|
||||
"through the AdminClient IncrementalAlterConfigs API. "
|
||||
"The allowed range is configured with the broker configuration "
|
||||
"properties `group.consumer.min.session.timeout.ms` and "
|
||||
"`group.consumer.max.session.timeout.ms`. "
|
||||
"Also see `max.poll.interval.ms`.",
|
||||
1, 3600 * 1000, 45 * 1000},
|
||||
{_RK_GLOBAL | _RK_CGRP, "heartbeat.interval.ms", _RK_C_INT,
|
||||
_RK(group_heartbeat_intvl_ms),
|
||||
"Group session keepalive heartbeat interval.", 1, 3600 * 1000, 3 * 1000},
|
||||
"Group session keepalive heartbeat interval. "
|
||||
"`heartbeat.interval.ms` is not supported for `group.protocol=consumer`. "
|
||||
"It is set with the broker configuration property "
|
||||
"`group.consumer.heartbeat.interval.ms` by default or can be configured "
|
||||
"through the AdminClient IncrementalAlterConfigs API. The allowed range "
|
||||
"is configured with the broker configuration properties "
|
||||
"`group.consumer.min.heartbeat.interval.ms` and "
|
||||
"`group.consumer.max.heartbeat.interval.ms`.",
|
||||
1, 3600 * 1000, 3 * 1000},
|
||||
{_RK_GLOBAL | _RK_CGRP, "group.protocol.type", _RK_C_KSTR,
|
||||
_RK(group_protocol_type),
|
||||
"Group protocol type. NOTE: Currently, the only supported group "
|
||||
"protocol type is `consumer`.",
|
||||
"Group protocol type for the `classic` group protocol. NOTE: Currently, "
|
||||
"the only supported group protocol type is `consumer`. "
|
||||
"`group.protocol.type` is not supported for `group.protocol=consumer`",
|
||||
.sdef = "consumer"},
|
||||
{_RK_GLOBAL | _RK_CGRP | _RK_HIGH, "group.protocol", _RK_C_S2I,
|
||||
_RK(group_protocol),
|
||||
"Group protocol to use. Use `classic` for the original protocol and "
|
||||
"`consumer` for the new "
|
||||
"protocol introduced in KIP-848. Available protocols: classic or "
|
||||
"consumer. Default is `classic`, "
|
||||
"but will change to `consumer` in next releases.",
|
||||
.vdef = RD_KAFKA_GROUP_PROTOCOL_CLASSIC,
|
||||
.s2i = {{RD_KAFKA_GROUP_PROTOCOL_CLASSIC, "classic"},
|
||||
{RD_KAFKA_GROUP_PROTOCOL_CONSUMER, "consumer"}}},
|
||||
{_RK_GLOBAL | _RK_CGRP | _RK_MED, "group.remote.assignor", _RK_C_STR,
|
||||
_RK(group_remote_assignor),
|
||||
"Server side assignor to use. Keep it null to make server select a "
|
||||
"suitable assignor for the group. "
|
||||
"Available assignors: uniform or range. Default is null",
|
||||
.sdef = NULL},
|
||||
{_RK_GLOBAL | _RK_CGRP, "coordinator.query.interval.ms", _RK_C_INT,
|
||||
_RK(coord_query_intvl_ms),
|
||||
"How often to query for the current client group coordinator. "
|
||||
|
|
@ -1197,6 +1399,16 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
"Maximum time the broker may wait to fill the Fetch response "
|
||||
"with fetch.min.bytes of messages.",
|
||||
0, 300 * 1000, 500},
|
||||
{_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "fetch.queue.backoff.ms", _RK_C_INT,
|
||||
_RK(fetch_queue_backoff_ms),
|
||||
"How long to postpone the next fetch request for a "
|
||||
"topic+partition in case the current fetch queue thresholds "
|
||||
"(queued.min.messages or queued.max.messages.kbytes) have "
|
||||
"been exceded. "
|
||||
"This property may need to be decreased if the queue thresholds are "
|
||||
"set low and the application is experiencing long (~1s) delays "
|
||||
"between messages. Low values may increase CPU utilization.",
|
||||
0, 300 * 1000, 1000},
|
||||
{_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "fetch.message.max.bytes", _RK_C_INT,
|
||||
_RK(fetch_msg_max_bytes),
|
||||
"Initial maximum number of bytes per topic+partition to request when "
|
||||
|
|
@ -1332,7 +1544,8 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
{_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "queue.buffering.max.messages",
|
||||
_RK_C_INT, _RK(queue_buffering_max_msgs),
|
||||
"Maximum number of messages allowed on the producer queue. "
|
||||
"This queue is shared by all topics and partitions. A value of 0 disables "
|
||||
"This queue is shared by all topics and partitions. A value of 0 "
|
||||
"disables "
|
||||
"this limit.",
|
||||
0, INT_MAX, 100000},
|
||||
{_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "queue.buffering.max.kbytes",
|
||||
|
|
@ -1360,10 +1573,21 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
0, INT32_MAX, INT32_MAX},
|
||||
{_RK_GLOBAL | _RK_PRODUCER, "retries", _RK_C_ALIAS,
|
||||
.sdef = "message.send.max.retries"},
|
||||
{_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "retry.backoff.ms", _RK_C_INT,
|
||||
_RK(retry_backoff_ms),
|
||||
"The backoff time in milliseconds before retrying a protocol request.", 1,
|
||||
300 * 1000, 100},
|
||||
|
||||
{_RK_GLOBAL | _RK_MED, "retry.backoff.ms", _RK_C_INT, _RK(retry_backoff_ms),
|
||||
"The backoff time in milliseconds before retrying a protocol request, "
|
||||
"this is the first backoff time, "
|
||||
"and will be backed off exponentially until number of retries is "
|
||||
"exhausted, and it's capped by retry.backoff.max.ms.",
|
||||
1, 300 * 1000, 100},
|
||||
|
||||
{_RK_GLOBAL | _RK_MED, "retry.backoff.max.ms", _RK_C_INT,
|
||||
_RK(retry_backoff_max_ms),
|
||||
"The max backoff time in milliseconds before retrying a protocol "
|
||||
"request, "
|
||||
"this is the atmost backoff allowed for exponentially backed off "
|
||||
"requests.",
|
||||
1, 300 * 1000, 1000},
|
||||
|
||||
{_RK_GLOBAL | _RK_PRODUCER, "queue.buffering.backpressure.threshold",
|
||||
_RK_C_INT, _RK(queue_backpressure_thres),
|
||||
|
|
@ -1427,6 +1651,29 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
"A higher value allows for more effective batching of these "
|
||||
"messages.",
|
||||
0, 900000, 10},
|
||||
{_RK_GLOBAL, "client.dns.lookup", _RK_C_S2I, _RK(client_dns_lookup),
|
||||
"Controls how the client uses DNS lookups. By default, when the lookup "
|
||||
"returns multiple IP addresses for a hostname, they will all be "
|
||||
"attempted "
|
||||
"for connection before the connection is considered failed. This applies "
|
||||
"to both bootstrap and advertised servers. If the value is set to "
|
||||
"`resolve_canonical_bootstrap_servers_only`, each entry will be resolved "
|
||||
"and expanded into a list of canonical names. "
|
||||
"**WARNING**: `resolve_canonical_bootstrap_servers_only` "
|
||||
"must only be used with `GSSAPI` (Kerberos) as `sasl.mechanism`, "
|
||||
"as it's the only purpose of this configuration value. "
|
||||
"**NOTE**: Default here is different from the Java client's default "
|
||||
"behavior, which connects only to the first IP address returned for a "
|
||||
"hostname. ",
|
||||
.vdef = RD_KAFKA_USE_ALL_DNS_IPS,
|
||||
.s2i = {{RD_KAFKA_USE_ALL_DNS_IPS, "use_all_dns_ips"},
|
||||
{RD_KAFKA_RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY,
|
||||
"resolve_canonical_bootstrap_servers_only"}}},
|
||||
{_RK_GLOBAL, "enable.metrics.push", _RK_C_BOOL, _RK(enable_metrics_push),
|
||||
"Whether to enable pushing of client metrics to the cluster, if the "
|
||||
"cluster has a client metrics subscription which matches this client",
|
||||
0, 1, 1},
|
||||
|
||||
|
||||
|
||||
/*
|
||||
|
|
@ -1493,7 +1740,8 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
|
|||
"`murmur2_random` - Java Producer compatible Murmur2 hash of key "
|
||||
"(NULL keys are randomly partitioned. This is functionally equivalent "
|
||||
"to the default partitioner in the Java Producer.), "
|
||||
"`fnv1a` - FNV-1a hash of key (NULL keys are mapped to single partition), "
|
||||
"`fnv1a` - FNV-1a hash of key (NULL keys are mapped to single "
|
||||
"partition), "
|
||||
"`fnv1a_random` - FNV-1a hash of key (NULL keys are randomly "
|
||||
"partitioned).",
|
||||
.sdef = "consistent_random",
|
||||
|
|
@ -2263,7 +2511,7 @@ static int rd_kafka_anyconf_set(int scope,
|
|||
const struct rd_kafka_property *_prop; \
|
||||
rd_kafka_conf_res_t _res; \
|
||||
_prop = rd_kafka_conf_prop_find(SCOPE, NAME); \
|
||||
rd_assert(_prop && * "invalid property name"); \
|
||||
rd_assert(_prop && *"invalid property name"); \
|
||||
_res = rd_kafka_anyconf_set_prop( \
|
||||
SCOPE, CONF, _prop, (const void *)VALUE, \
|
||||
1 /*allow-specifics*/, NULL, 0); \
|
||||
|
|
@ -3711,10 +3959,33 @@ const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype,
|
|||
if (conf->ssl.ca && (conf->ssl.ca_location || conf->ssl.ca_pem))
|
||||
return "`ssl.ca.location` or `ssl.ca.pem`, and memory-based "
|
||||
"set_ssl_cert(CERT_CA) are mutually exclusive.";
|
||||
|
||||
#if WITH_OAUTHBEARER_OIDC
|
||||
if (conf->https.ca_location && conf->https.ca_pem)
|
||||
return "`https.ca.location` and `https.ca.pem` "
|
||||
"are mutually exclusive";
|
||||
|
||||
if (conf->https.ca_location &&
|
||||
rd_strcmp(conf->https.ca_location, "probe") &&
|
||||
!rd_file_stat(conf->https.ca_location, NULL))
|
||||
return "`https.ca.location` must be "
|
||||
"an existing file or directory";
|
||||
|
||||
#if !CURL_AT_LEAST_VERSION(7, 77, 0)
|
||||
if (conf->https.ca_pem)
|
||||
return "`https.ca.pem` requires libcurl 7.77.0 or later";
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __APPLE__
|
||||
else if (!conf->ssl.ca && !conf->ssl.ca_location && !conf->ssl.ca_pem)
|
||||
/* Default ssl.ca.location to 'probe' on OSX */
|
||||
rd_kafka_conf_set(conf, "ssl.ca.location", "probe", NULL, 0);
|
||||
|
||||
/* Default https.ca.location to 'probe' on OSX */
|
||||
if (!conf->https.ca_location && !conf->https.ca_pem)
|
||||
rd_kafka_conf_set(conf, "https.ca.location", "probe", NULL, 0);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
|
@ -3734,7 +4005,17 @@ const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype,
|
|||
"mutually exclusive";
|
||||
|
||||
if (conf->sasl.oauthbearer.method ==
|
||||
RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC) {
|
||||
RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC &&
|
||||
!conf->sasl.oauthbearer.token_endpoint_url) {
|
||||
return "`sasl.oauthbearer.token.endpoint.url` "
|
||||
"is mandatory when "
|
||||
"`sasl.oauthbearer.method=oidc` is set";
|
||||
}
|
||||
|
||||
if (conf->sasl.oauthbearer.method ==
|
||||
RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC &&
|
||||
conf->sasl.oauthbearer.grant_type ==
|
||||
RD_KAFKA_SASL_OAUTHBEARER_GRANT_TYPE_CLIENT_CREDENTIALS) {
|
||||
if (!conf->sasl.oauthbearer.client_id)
|
||||
return "`sasl.oauthbearer.client.id` is "
|
||||
"mandatory when "
|
||||
|
|
@ -3745,14 +4026,150 @@ const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype,
|
|||
"mandatory when "
|
||||
"`sasl.oauthbearer.method=oidc` is set";
|
||||
}
|
||||
}
|
||||
if (conf->sasl.oauthbearer.method ==
|
||||
RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC &&
|
||||
conf->sasl.oauthbearer.grant_type ==
|
||||
RD_KAFKA_SASL_OAUTHBEARER_GRANT_TYPE_JWT_BEARER) {
|
||||
if (conf->sasl.oauthbearer.assertion.file) {
|
||||
if (conf->sasl.oauthbearer.assertion.private_key
|
||||
.file)
|
||||
return "Mutually exclusive properties "
|
||||
"set. "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"file` and "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"private."
|
||||
"key.file` cannot both be set";
|
||||
|
||||
if (!conf->sasl.oauthbearer.token_endpoint_url) {
|
||||
return "`sasl.oauthbearer.token.endpoint.url` "
|
||||
"is mandatory when "
|
||||
"`sasl.oauthbearer.method=oidc` is set";
|
||||
if (conf->sasl.oauthbearer.assertion.private_key
|
||||
.pem)
|
||||
return "Mutually exclusive properties "
|
||||
"set. "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"file` and "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"private."
|
||||
"key.pem` cannot both be set";
|
||||
|
||||
if (conf->sasl.oauthbearer.assertion.private_key
|
||||
.passphrase)
|
||||
return "Mutually exclusive properties "
|
||||
"set. "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"file` and "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"private."
|
||||
"key.passphrase` cannot both be "
|
||||
"set";
|
||||
|
||||
if (conf->sasl.oauthbearer.assertion
|
||||
.jwt_template_file)
|
||||
return "Mutually exclusive properties "
|
||||
"set. "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"file` and "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"jwt.template.file` cannot both "
|
||||
"be set";
|
||||
|
||||
if (conf->sasl.oauthbearer.assertion.claim
|
||||
.subject)
|
||||
return "Mutually exclusive properties "
|
||||
"set. "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"file` and "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"claim.sub` cannot both be set";
|
||||
|
||||
if (conf->sasl.oauthbearer.assertion.claim
|
||||
.audience)
|
||||
return "Mutually exclusive properties "
|
||||
"set. "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"file` and "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"claim.aud` cannot both be set";
|
||||
|
||||
if (conf->sasl.oauthbearer.assertion.claim
|
||||
.issuer)
|
||||
return "Mutually exclusive properties "
|
||||
"set. "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"file` and "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"claim.iss` cannot both be set";
|
||||
|
||||
if (rd_kafka_conf_is_modified(
|
||||
conf,
|
||||
"sasl.oauthbearer."
|
||||
"assertion.claim.jti.include"))
|
||||
return "Mutually exclusive properties "
|
||||
"set. "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"file` and "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"claim.jti.include` cannot both "
|
||||
"be set";
|
||||
|
||||
if (rd_kafka_conf_is_modified(
|
||||
conf,
|
||||
"sasl.oauthbearer."
|
||||
"assertion.claim.exp.seconds"))
|
||||
return "Mutually exclusive properties "
|
||||
"set. "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"file` and "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"claim.exp.seconds` cannot both "
|
||||
"be set";
|
||||
|
||||
|
||||
if (rd_kafka_conf_is_modified(
|
||||
conf,
|
||||
"sasl.oauthbearer."
|
||||
"assertion.claim.nbf.seconds"))
|
||||
return "Mutually exclusive properties "
|
||||
"set. "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"file` and "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"claim.nbf.seconds` cannot both "
|
||||
"be set";
|
||||
} else {
|
||||
if (conf->sasl.oauthbearer.assertion.private_key
|
||||
.file &&
|
||||
conf->sasl.oauthbearer.assertion.private_key
|
||||
.pem)
|
||||
return "Mutually exclusive properties "
|
||||
"set. "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"private."
|
||||
"key.file` and "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"private."
|
||||
"key.pem` cannot both be set";
|
||||
|
||||
if (!conf->sasl.oauthbearer.assertion
|
||||
.private_key.file &&
|
||||
!conf->sasl.oauthbearer.assertion
|
||||
.private_key.pem)
|
||||
return "`sasl.oauthbearer.assertion."
|
||||
"private."
|
||||
"key.file` or "
|
||||
"`sasl.oauthbearer.assertion."
|
||||
"private."
|
||||
"key.pem` is mandatory when "
|
||||
"`sasl.oauthbearer.grant.type` "
|
||||
"is set to "
|
||||
"`urn:ietf:params:oauth:grant-"
|
||||
"type:jwt-"
|
||||
"bearer`";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* Enable background thread for the builtin OIDC handler,
|
||||
* unless a refresh callback has been set. */
|
||||
if (conf->sasl.oauthbearer.method ==
|
||||
|
|
@ -3767,6 +4184,43 @@ const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype,
|
|||
|
||||
if (cltype == RD_KAFKA_CONSUMER) {
|
||||
|
||||
if (conf->group_protocol == RD_KAFKA_GROUP_PROTOCOL_CLASSIC) {
|
||||
if (conf->max_poll_interval_ms <
|
||||
conf->group_session_timeout_ms)
|
||||
return "`max.poll.interval.ms`must be >= "
|
||||
"`session.timeout.ms`";
|
||||
} else {
|
||||
|
||||
if (rd_kafka_conf_is_modified(conf,
|
||||
"session.timeout.ms")) {
|
||||
return "`session.timeout.ms` is not supported "
|
||||
"for `group.protocol=consumer`. It is "
|
||||
"defined broker side";
|
||||
}
|
||||
|
||||
if (rd_kafka_conf_is_modified(
|
||||
conf, "partition.assignment.strategy")) {
|
||||
return "`partition.assignment.strategy` is not "
|
||||
"supported for "
|
||||
"`group.protocol=consumer`. Use "
|
||||
"`group.remote.assignor` instead";
|
||||
}
|
||||
|
||||
if (rd_kafka_conf_is_modified(conf,
|
||||
"group.protocol.type")) {
|
||||
return "`group.protocol.type` is not supported "
|
||||
"for `group.protocol=consumer`";
|
||||
}
|
||||
|
||||
if (rd_kafka_conf_is_modified(
|
||||
conf, "heartbeat.interval.ms")) {
|
||||
return "`heartbeat.interval.ms` is not "
|
||||
"supported "
|
||||
"for `group.protocol=consumer`. It is "
|
||||
"defined broker side";
|
||||
}
|
||||
}
|
||||
|
||||
/* Automatically adjust `fetch.max.bytes` to be >=
|
||||
* `message.max.bytes` and <= `queued.max.message.kbytes`
|
||||
* unless set by user. */
|
||||
|
|
@ -3797,10 +4251,6 @@ const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype,
|
|||
conf->fetch_max_bytes + 512);
|
||||
}
|
||||
|
||||
if (conf->max_poll_interval_ms < conf->group_session_timeout_ms)
|
||||
return "`max.poll.interval.ms`must be >= "
|
||||
"`session.timeout.ms`";
|
||||
|
||||
/* Simplifies rd_kafka_is_idempotent() which is producer-only */
|
||||
conf->eos.idempotence = 0;
|
||||
|
||||
|
|
@ -3895,7 +4345,7 @@ const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype,
|
|||
|
||||
if (conf->reconnect_backoff_max_ms < conf->reconnect_backoff_ms)
|
||||
return "`reconnect.backoff.max.ms` must be >= "
|
||||
"`reconnect.max.ms`";
|
||||
"`reconnect.backoff.ms`";
|
||||
|
||||
if (conf->sparse_connections) {
|
||||
/* Set sparse connection random selection interval to
|
||||
|
|
@ -3903,6 +4353,10 @@ const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype,
|
|||
conf->sparse_connect_intvl =
|
||||
RD_MAX(11, RD_MIN(conf->reconnect_backoff_ms / 2, 1000));
|
||||
}
|
||||
if (!rd_kafka_conf_is_modified(
|
||||
conf, "topic.metadata.refresh.fast.interval.ms"))
|
||||
conf->metadata_refresh_fast_interval_ms =
|
||||
conf->retry_backoff_ms;
|
||||
|
||||
if (!rd_kafka_conf_is_modified(conf, "connections.max.idle.ms") &&
|
||||
conf->brokerlist && rd_strcasestr(conf->brokerlist, "azure")) {
|
||||
|
|
@ -4091,6 +4545,31 @@ int rd_kafka_conf_warn(rd_kafka_t *rk) {
|
|||
"recommend not using set_default_topic_conf");
|
||||
|
||||
/* Additional warnings */
|
||||
if (rk->rk_conf.retry_backoff_ms > rk->rk_conf.retry_backoff_max_ms) {
|
||||
rd_kafka_log(
|
||||
rk, LOG_WARNING, "CONFWARN",
|
||||
"Configuration `retry.backoff.ms` with value %d is greater "
|
||||
"than configuration `retry.backoff.max.ms` with value %d. "
|
||||
"A static backoff with value `retry.backoff.max.ms` will "
|
||||
"be applied.",
|
||||
rk->rk_conf.retry_backoff_ms,
|
||||
rk->rk_conf.retry_backoff_max_ms);
|
||||
}
|
||||
|
||||
if (rd_kafka_conf_is_modified(
|
||||
&rk->rk_conf, "topic.metadata.refresh.fast.interval.ms") &&
|
||||
rk->rk_conf.metadata_refresh_fast_interval_ms >
|
||||
rk->rk_conf.retry_backoff_max_ms) {
|
||||
rd_kafka_log(
|
||||
rk, LOG_WARNING, "CONFWARN",
|
||||
"Configuration `topic.metadata.refresh.fast.interval.ms` "
|
||||
"with value %d is greater than configuration "
|
||||
"`retry.backoff.max.ms` with value %d. "
|
||||
"A static backoff with value `retry.backoff.max.ms` will "
|
||||
"be applied.",
|
||||
rk->rk_conf.metadata_refresh_fast_interval_ms,
|
||||
rk->rk_conf.retry_backoff_max_ms);
|
||||
}
|
||||
if (rk->rk_type == RD_KAFKA_CONSUMER) {
|
||||
if (rk->rk_conf.fetch_wait_max_ms + 1000 >
|
||||
rk->rk_conf.socket_timeout_ms)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2014-2018 Magnus Edenhill
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* 2023, Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -33,7 +34,7 @@
|
|||
#include "rdkafka_cert.h"
|
||||
|
||||
#if WITH_SSL && OPENSSL_VERSION_NUMBER >= 0x10100000 && \
|
||||
!defined(OPENSSL_IS_BORINGSSL)
|
||||
!defined(OPENSSL_NO_ENGINE)
|
||||
#define WITH_SSL_ENGINE 1
|
||||
/* Deprecated in OpenSSL 3 */
|
||||
#include <openssl/engine.h>
|
||||
|
|
@ -150,17 +151,42 @@ typedef enum {
|
|||
|
||||
typedef enum {
|
||||
RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT,
|
||||
RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC
|
||||
RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC,
|
||||
} rd_kafka_oauthbearer_method_t;
|
||||
|
||||
typedef enum {
|
||||
RD_KAFKA_SASL_OAUTHBEARER_GRANT_TYPE_CLIENT_CREDENTIALS,
|
||||
RD_KAFKA_SASL_OAUTHBEARER_GRANT_TYPE_JWT_BEARER,
|
||||
} rd_kafka_oauthbearer_grant_type_t;
|
||||
|
||||
typedef enum {
|
||||
RD_KAFKA_SASL_OAUTHBEARER_ASSERTION_ALGORITHM_RS256,
|
||||
RD_KAFKA_SASL_OAUTHBEARER_ASSERTION_ALGORITHM_ES256,
|
||||
} rd_kafka_oauthbearer_assertion_algorithm_t;
|
||||
|
||||
typedef enum {
|
||||
RD_KAFKA_SSL_ENDPOINT_ID_NONE,
|
||||
RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, /**< RFC2818 */
|
||||
} rd_kafka_ssl_endpoint_id_t;
|
||||
|
||||
typedef enum {
|
||||
RD_KAFKA_USE_ALL_DNS_IPS,
|
||||
RD_KAFKA_RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY,
|
||||
} rd_kafka_client_dns_lookup_t;
|
||||
|
||||
typedef enum {
|
||||
RD_KAFKA_GROUP_PROTOCOL_CLASSIC,
|
||||
RD_KAFKA_GROUP_PROTOCOL_CONSUMER,
|
||||
} rd_kafka_group_protocol_t;
|
||||
|
||||
typedef enum {
|
||||
RD_KAFKA_METADATA_RECOVERY_STRATEGY_NONE,
|
||||
RD_KAFKA_METADATA_RECOVERY_STRATEGY_REBOOTSTRAP,
|
||||
} rd_kafka_metadata_recovery_strategy_t;
|
||||
|
||||
/* Increase in steps of 64 as needed.
|
||||
* This must be larger than sizeof(rd_kafka_[topic_]conf_t) */
|
||||
#define RD_KAFKA_CONF_PROPS_IDX_MAX (64 * 33)
|
||||
#define RD_KAFKA_CONF_PROPS_IDX_MAX (64 * 35)
|
||||
|
||||
/**
|
||||
* @struct rd_kafka_anyconf_t
|
||||
|
|
@ -191,6 +217,7 @@ struct rd_kafka_conf_s {
|
|||
int msg_copy_max_size;
|
||||
int recv_max_msg_size;
|
||||
int max_inflight;
|
||||
int metadata_recovery_rebootstrap_trigger_ms;
|
||||
int metadata_request_timeout_ms;
|
||||
int metadata_refresh_interval_ms;
|
||||
int metadata_refresh_fast_cnt;
|
||||
|
|
@ -224,6 +251,8 @@ struct rd_kafka_conf_s {
|
|||
int api_version_fallback_ms;
|
||||
char *broker_version_fallback;
|
||||
rd_kafka_secproto_t security_protocol;
|
||||
rd_kafka_client_dns_lookup_t client_dns_lookup;
|
||||
rd_kafka_metadata_recovery_strategy_t metadata_recovery_strategy;
|
||||
|
||||
struct {
|
||||
#if WITH_SSL
|
||||
|
|
@ -269,6 +298,11 @@ struct rd_kafka_conf_s {
|
|||
void *opaque);
|
||||
} ssl;
|
||||
|
||||
struct {
|
||||
char *ca_location;
|
||||
char *ca_pem;
|
||||
} https;
|
||||
|
||||
struct {
|
||||
const struct rd_kafka_sasl_provider *provider;
|
||||
char *principal;
|
||||
|
|
@ -298,10 +332,33 @@ struct rd_kafka_conf_s {
|
|||
int enable_callback_queue;
|
||||
struct {
|
||||
rd_kafka_oauthbearer_method_t method;
|
||||
rd_kafka_oauthbearer_grant_type_t grant_type;
|
||||
char *token_endpoint_url;
|
||||
char *client_id;
|
||||
char *client_secret;
|
||||
char *scope;
|
||||
struct {
|
||||
rd_kafka_oauthbearer_assertion_algorithm_t
|
||||
algorithm;
|
||||
char *file;
|
||||
char *jwt_template_file;
|
||||
|
||||
struct {
|
||||
char *subject;
|
||||
char *audience;
|
||||
char *issuer;
|
||||
rd_bool_t jti_include;
|
||||
int not_before_s;
|
||||
int expiration_s;
|
||||
} claim;
|
||||
struct {
|
||||
char *file;
|
||||
char *passphrase;
|
||||
char *pem;
|
||||
} private_key;
|
||||
|
||||
} assertion;
|
||||
|
||||
char *extensions_str;
|
||||
/* SASL/OAUTHBEARER token refresh event callback */
|
||||
void (*token_refresh_cb)(rd_kafka_t *rk,
|
||||
|
|
@ -342,6 +399,7 @@ struct rd_kafka_conf_s {
|
|||
/* Client group configuration */
|
||||
int coord_query_intvl_ms;
|
||||
int max_poll_interval_ms;
|
||||
int enable_metrics_push;
|
||||
|
||||
int builtin_features;
|
||||
/*
|
||||
|
|
@ -355,9 +413,12 @@ struct rd_kafka_conf_s {
|
|||
int fetch_msg_max_bytes;
|
||||
int fetch_max_bytes;
|
||||
int fetch_min_bytes;
|
||||
int fetch_queue_backoff_ms;
|
||||
int fetch_error_backoff_ms;
|
||||
rd_kafka_group_protocol_t group_protocol;
|
||||
char *group_id_str;
|
||||
char *group_instance_id;
|
||||
char *group_remote_assignor;
|
||||
int allow_auto_create_topics;
|
||||
|
||||
rd_kafka_pattern_list_t *topic_blacklist;
|
||||
|
|
@ -416,6 +477,7 @@ struct rd_kafka_conf_s {
|
|||
int queue_backpressure_thres;
|
||||
int max_retries;
|
||||
int retry_backoff_ms;
|
||||
int retry_backoff_max_ms;
|
||||
int batch_num_messages;
|
||||
int batch_size;
|
||||
rd_kafka_compression_t compression_codec;
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2014-2018 Magnus Edenhill
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2019 Magnus Edenhill
|
||||
* Copyright (c) 2019-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2019 Magnus Edenhill
|
||||
* Copyright (c) 2019-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2020 Magnus Edenhill
|
||||
* Copyright (c) 2020-2022, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue