mirror of https://github.com/mongodb/mongo
SERVER-108005 [streams] 1) Revert libradkafka upgrade to v2.6.0 2) Revert splitting up of streams_kafka suite 3) Remove streams_kafka from burn_in_tests (#39274)
GitOrigin-RevId: 2dab3267e1e259306a6d7ea93027a21a87434aeb
This commit is contained in:
parent
8d874b9344
commit
7a8253469a
|
|
@ -47,7 +47,7 @@ a notice will be included in
|
|||
| [jbeder/yaml-cpp] | MIT | 0.6.3 | | ✗ |
|
||||
| [JSON-Schema-Test-Suite] | Unknown License | Unknown | | |
|
||||
| [libmongocrypt] | Apache-2.0 | 1.14.0 | ✗ | ✗ |
|
||||
| [librdkafka - the Apache Kafka C/C++ client library] | BSD-3-Clause, Xmlproc License, ISC, MIT, Public Domain, Zlib, BSD-2-Clause, Andreas Stolcke License | 2.6.0 | | ✗ |
|
||||
| [librdkafka - the Apache Kafka C/C++ client library] | BSD-3-Clause, Xmlproc License, ISC, MIT, Public Domain, Zlib, BSD-2-Clause, Andreas Stolcke License | 2.0.2 | | ✗ |
|
||||
| [LibTomCrypt] | WTFPL, Public Domain | 1.18.2 | ✗ | ✗ |
|
||||
| [libunwind/libunwind] | MIT | v1.8.1 | | ✗ |
|
||||
| [linenoise] | BSD-2-Clause | Unknown | | ✗ |
|
||||
|
|
|
|||
|
|
@ -3,8 +3,6 @@ test_kind: js_test
|
|||
selector:
|
||||
roots:
|
||||
- src/mongo/db/modules/*/jstests/streams_kafka/*.js
|
||||
exclude_files:
|
||||
- src/mongo/db/modules/enterprise/jstests/streams_kafka/kafka_utils.js
|
||||
|
||||
executor:
|
||||
fixture:
|
||||
|
|
|
|||
|
|
@ -5,7 +5,8 @@ selector:
|
|||
exclude_suites:
|
||||
# Requires an HTTP server to be running in the background.
|
||||
- queryable_wt
|
||||
# Each suite needs its own kafka broker running
|
||||
# When splitting into multiple suites, each suite will need
|
||||
# its own broker
|
||||
- streams_kafka
|
||||
# Exclude list of etc/evergreen.yml task names.
|
||||
exclude_tasks:
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ rules:
|
|||
forbidden_task_tag: "experimental"
|
||||
ignored_tasks: [
|
||||
# TODO: remove this exception when SERVER-94572 is resolved
|
||||
"streams_kafka_gen",
|
||||
"streams_kafka",
|
||||
"streams_kafka_gwproxy",
|
||||
"streams_lambda",
|
||||
"streams_s3",
|
||||
|
|
|
|||
|
|
@ -293,14 +293,12 @@ tasks:
|
|||
vars:
|
||||
fallback_num_sub_suites: 2
|
||||
|
||||
- <<: *gen_task_template
|
||||
name: streams_kafka_gen
|
||||
- <<: *task_template
|
||||
name: streams_kafka
|
||||
tags: ["assigned_to_jira_team_streams", "experimental"]
|
||||
commands:
|
||||
- func: "generate resmoke tasks"
|
||||
vars:
|
||||
fallback_num_sub_suites: 2
|
||||
resmoke_jobs_max: 1
|
||||
- func: "do setup"
|
||||
- func: "run tests"
|
||||
|
||||
- <<: *task_template
|
||||
name: streams_kafka_gwproxy
|
||||
|
|
|
|||
|
|
@ -515,7 +515,7 @@ buildvariants:
|
|||
distros:
|
||||
- amazon2023-latest-large
|
||||
- name: streams_gen
|
||||
- name: streams_kafka_gen
|
||||
- name: streams_kafka
|
||||
- name: streams_kafka_gwproxy
|
||||
- name: streams_kafka_benchmark
|
||||
- name: streams_https
|
||||
|
|
@ -565,7 +565,7 @@ buildvariants:
|
|||
distros:
|
||||
- amazon2-latest-large
|
||||
- name: streams_gen
|
||||
- name: streams_kafka_gen
|
||||
- name: streams_kafka
|
||||
- name: streams_kafka_gwproxy
|
||||
- name: streams_kafka_benchmark
|
||||
- name: streams_https
|
||||
|
|
@ -615,7 +615,7 @@ buildvariants:
|
|||
distros:
|
||||
- amazon2-arm64-latest-large
|
||||
- name: streams_gen
|
||||
- name: streams_kafka_gen
|
||||
- name: streams_kafka
|
||||
# TODO(SERVER-103985) - Enabling this is blocked on SRE support arm64 in the sre/gwproxy image (SRE-1481)
|
||||
# - name: streams_kafka_gwproxy
|
||||
- name: streams_kafka_benchmark
|
||||
|
|
|
|||
|
|
@ -314,7 +314,7 @@ buildvariants:
|
|||
tags: ["suggested"]
|
||||
tasks:
|
||||
&experimental-task-list # TODO(SERVER-90936): Remove streams_kafka* and streams_lambda tests when they work with the "default" tag.
|
||||
- name: streams_kafka_gen
|
||||
- name: streams_kafka
|
||||
- name: streams_kafka_gwproxy
|
||||
- name: streams_lambda
|
||||
- name: streams_s3
|
||||
|
|
|
|||
|
|
@ -1212,14 +1212,14 @@
|
|||
},
|
||||
{
|
||||
"type": "library",
|
||||
"bom-ref": "pkg:github/confluentinc/librdkafka@v2.6.0",
|
||||
"bom-ref": "pkg:github/confluentinc/librdkafka@v2.0.2",
|
||||
"supplier": {
|
||||
"name": "Confluent Inc."
|
||||
},
|
||||
"author": "Magnus Edenhill",
|
||||
"group": "confluentinc",
|
||||
"name": "librdkafka - The Apache Kafka C/C++ library",
|
||||
"version": "2.6.0",
|
||||
"version": "2.0.2",
|
||||
"licenses": [
|
||||
{
|
||||
"license": {
|
||||
|
|
@ -1228,8 +1228,8 @@
|
|||
}
|
||||
],
|
||||
"copyright": "Copyright (c) 2012-2022, Magnus Edenhill; 2023, Confluent Inc.",
|
||||
"cpe": "cpe:2.3:a:confluent:librdkafka:2.6.0:*:*:*:*:*:*:*",
|
||||
"purl": "pkg:github/confluentinc/librdkafka@v2.6.0",
|
||||
"cpe": "cpe:2.3:a:confluent:librdkafka:2.0.2:*:*:*:*:*:*:*",
|
||||
"purl": "pkg:github/confluentinc/librdkafka@v2.0.2",
|
||||
"properties": [
|
||||
{
|
||||
"name": "internal:team_responsible",
|
||||
|
|
|
|||
|
|
@ -40,15 +40,8 @@ mongo_cc_library(
|
|||
"dist/src/lz4.c",
|
||||
"dist/src/lz4frame.c",
|
||||
"dist/src/lz4hc.c",
|
||||
"dist/src/nanopb/pb_common.c",
|
||||
"dist/src/nanopb/pb_decode.c",
|
||||
"dist/src/nanopb/pb_encode.c",
|
||||
"dist/src/opentelemetry/common.pb.c",
|
||||
"dist/src/opentelemetry/metrics.pb.c",
|
||||
"dist/src/opentelemetry/resource.pb.c",
|
||||
"dist/src/rdaddr.c",
|
||||
"dist/src/rdavl.c",
|
||||
"dist/src/rdbase64.c",
|
||||
"dist/src/rdbuf.c",
|
||||
"dist/src/rdcrc32.c",
|
||||
"dist/src/rddl.c",
|
||||
|
|
@ -102,9 +95,6 @@ mongo_cc_library(
|
|||
"dist/src/rdkafka_ssl.c",
|
||||
"dist/src/rdkafka_sticky_assignor.c",
|
||||
"dist/src/rdkafka_subscription.c",
|
||||
"dist/src/rdkafka_telemetry.c",
|
||||
"dist/src/rdkafka_telemetry_decode.c",
|
||||
"dist/src/rdkafka_telemetry_encode.c",
|
||||
"dist/src/rdkafka_timer.c",
|
||||
"dist/src/rdkafka_topic.c",
|
||||
"dist/src/rdkafka_transport.c",
|
||||
|
|
@ -137,11 +127,7 @@ mongo_cc_library(
|
|||
copts = [
|
||||
"-Wno-array-bounds",
|
||||
"-Wno-unused-variable",
|
||||
"-Wno-enum-conversion",
|
||||
"-Wno-format-truncation",
|
||||
"-Wno-implicit-fallthrough",
|
||||
"-Wno-implicit-function-declaration",
|
||||
"-Wno-int-conversion",
|
||||
"-Wno-unused-but-set-variable",
|
||||
"-I$(GENDIR)/src/third_party/librdkafka/dist/FAKE",
|
||||
"-U_GNU_SOURCE",
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
librdkafka - Apache Kafka C driver library
|
||||
|
||||
Copyright (c) 2012-2022, Magnus Edenhill
|
||||
2023, Confluent Inc.
|
||||
Copyright (c) 2012-2020, Magnus Edenhill
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git 5ff839680134437dbf4678f3d0c7b371d84f4964
|
||||
src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3
|
||||
|
||||
LZ4 Library
|
||||
Copyright (c) 2011-2020, Yann Collet
|
||||
Copyright (c) 2011-2016, Yann Collet
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
|
|
|
|||
|
|
@ -1,22 +0,0 @@
|
|||
For files in src/nanopb : https://github.com/nanopb/nanopb/blob/8ef41e0ebd45daaf19459a011f67e66224b247cd/LICENSE.txt
|
||||
|
||||
Copyright (c) 2011 Petteri Aimonen <jpa at nanopb.mail.kapsi.fi>
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held liable
|
||||
for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any
|
||||
purpose, including commercial applications, and to alter it and
|
||||
redistribute it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you
|
||||
must not claim that you wrote the original software. If you use
|
||||
this software in a product, an acknowledgment in the product
|
||||
documentation would be appreciated but is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such, and
|
||||
must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any source
|
||||
distribution.
|
||||
|
|
@ -1,203 +0,0 @@
|
|||
For files in src/opentelemetry: https://github.com/open-telemetry/opentelemetry-proto/blob/81a296f9dba23e32d77f46d58c8ea4244a2157a6/LICENSE
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -2,8 +2,7 @@ LICENSE
|
|||
--------------------------------------------------------------
|
||||
librdkafka - Apache Kafka C driver library
|
||||
|
||||
Copyright (c) 2012-2022, Magnus Edenhill
|
||||
2023, Confluent Inc.
|
||||
Copyright (c) 2012-2020, Magnus Edenhill
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -141,10 +140,10 @@ THE SOFTWARE
|
|||
|
||||
LICENSE.lz4
|
||||
--------------------------------------------------------------
|
||||
src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git 5ff839680134437dbf4678f3d0c7b371d84f4964
|
||||
src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3
|
||||
|
||||
LZ4 Library
|
||||
Copyright (c) 2011-2020, Yann Collet
|
||||
Copyright (c) 2011-2016, Yann Collet
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
|
|
@ -198,238 +197,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|||
SOFTWARE.
|
||||
|
||||
|
||||
LICENSE.nanopb
|
||||
--------------------------------------------------------------
|
||||
For files in src/nanopb : https://github.com/nanopb/nanopb/blob/8ef41e0ebd45daaf19459a011f67e66224b247cd/LICENSE.txt
|
||||
|
||||
Copyright (c) 2011 Petteri Aimonen <jpa at nanopb.mail.kapsi.fi>
|
||||
|
||||
This software is provided 'as-is', without any express or
|
||||
implied warranty. In no event will the authors be held liable
|
||||
for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any
|
||||
purpose, including commercial applications, and to alter it and
|
||||
redistribute it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you
|
||||
must not claim that you wrote the original software. If you use
|
||||
this software in a product, an acknowledgment in the product
|
||||
documentation would be appreciated but is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such, and
|
||||
must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any source
|
||||
distribution.
|
||||
|
||||
|
||||
LICENSE.opentelemetry
|
||||
--------------------------------------------------------------
|
||||
For files in src/opentelemetry: https://github.com/open-telemetry/opentelemetry-proto/blob/81a296f9dba23e32d77f46d58c8ea4244a2157a6/LICENSE
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
LICENSE.pycrc
|
||||
--------------------------------------------------------------
|
||||
The following license applies to the files rdcrc32.c and rdcrc32.h which
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@
|
|||
#define ENABLE_LZ4_EXT 1
|
||||
#define ENABLE_LZ4_EXT 1
|
||||
#define ENABLE_REGEX_EXT 1
|
||||
#define ENABLE_C11THREADS 0
|
||||
#define ENABLE_C11THREADS "try"
|
||||
#define ENABLE_SYSLOG 1
|
||||
#define MKL_APP_NAME "librdkafka"
|
||||
#define MKL_APP_DESC_ONELINE "The Apache Kafka C/C++ library"
|
||||
|
|
@ -26,6 +26,8 @@
|
|||
#define WITH_GCC 1
|
||||
// gxx
|
||||
#define WITH_GXX 1
|
||||
// pkgconfig
|
||||
#define WITH_PKGCONFIG 1
|
||||
// install
|
||||
#define WITH_INSTALL 1
|
||||
// gnuar
|
||||
|
|
@ -49,21 +51,21 @@
|
|||
// atomic_64
|
||||
#define ATOMIC_OP(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)
|
||||
// parseversion
|
||||
#define RDKAFKA_VERSION_STR "2.6.0"
|
||||
#define RDKAFKA_VERSION_STR "2.0.2"
|
||||
// parseversion
|
||||
#define MKL_APP_VERSION "2.6.0"
|
||||
// c11threads
|
||||
#define WITH_C11THREADS 0
|
||||
#define MKL_APP_VERSION "2.0.2"
|
||||
// libdl
|
||||
#define WITH_LIBDL 1
|
||||
// WITH_PLUGINS
|
||||
#define WITH_PLUGINS 1
|
||||
// zlib
|
||||
#define WITH_ZLIB 1
|
||||
// libssl
|
||||
#define WITH_SSL 1
|
||||
// libcrypto
|
||||
#define OPENSSL_SUPPRESS_DEPRECATED "OPENSSL_SUPPRESS_DEPRECATED"
|
||||
// libsasl2
|
||||
#define WITH_SASL_CYRUS 1
|
||||
// libzstd
|
||||
#define WITH_ZSTD 1
|
||||
// libcurl
|
||||
#define WITH_CURL 1
|
||||
// WITH_HDRHISTOGRAM
|
||||
|
|
@ -97,5 +99,5 @@
|
|||
// getrusage
|
||||
#define HAVE_GETRUSAGE 1
|
||||
// BUILT_WITH
|
||||
#define BUILT_WITH "GCC GXX INSTALL GNULD LDS C11THREADS LIBDL PLUGINS SSL SASL_CYRUS CURL HDRHISTOGRAM SYSLOG SNAPPY SOCKEM SASL_SCRAM SASL_OAUTHBEARER OAUTHBEARER_OIDC"
|
||||
#define BUILT_WITH "GCC GXX PKGCONFIG INSTALL GNULD LDS C11THREADS LIBDL PLUGINS ZLIB SSL SASL_CYRUS ZSTD CURL HDRHISTOGRAM SYSLOG SNAPPY SOCKEM SASL_SCRAM SASL_OAUTHBEARER OAUTHBEARER_OIDC"
|
||||
#endif /* _CONFIG_H_ */
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@
|
|||
#define ENABLE_LZ4_EXT 1
|
||||
#define ENABLE_LZ4_EXT 1
|
||||
#define ENABLE_REGEX_EXT 1
|
||||
#define ENABLE_C11THREADS 0
|
||||
#define ENABLE_C11THREADS "try"
|
||||
#define ENABLE_SYSLOG 1
|
||||
#define MKL_APP_NAME "librdkafka"
|
||||
#define MKL_APP_DESC_ONELINE "The Apache Kafka C/C++ library"
|
||||
|
|
@ -51,11 +51,9 @@
|
|||
// atomic_64
|
||||
#define ATOMIC_OP(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)
|
||||
// parseversion
|
||||
#define RDKAFKA_VERSION_STR "2.6.0"
|
||||
#define RDKAFKA_VERSION_STR "2.0.2"
|
||||
// parseversion
|
||||
#define MKL_APP_VERSION "2.6.0"
|
||||
// c11threads
|
||||
#define WITH_C11THREADS 0
|
||||
#define MKL_APP_VERSION "2.0.2"
|
||||
// libdl
|
||||
#define WITH_LIBDL 1
|
||||
// WITH_PLUGINS
|
||||
|
|
@ -64,8 +62,6 @@
|
|||
#define WITH_ZLIB 1
|
||||
// libssl
|
||||
#define WITH_SSL 1
|
||||
// libcrypto
|
||||
#define OPENSSL_SUPPRESS_DEPRECATED "OPENSSL_SUPPRESS_DEPRECATED"
|
||||
// libsasl2
|
||||
#define WITH_SASL_CYRUS 1
|
||||
// libzstd
|
||||
|
|
@ -105,5 +101,5 @@
|
|||
// getrusage
|
||||
#define HAVE_GETRUSAGE 1
|
||||
// BUILT_WITH
|
||||
#define BUILT_WITH "GCC GXX PKGCONFIG INSTALL GNULD LDS C11THREADS LIBDL PLUGINS ZLIB SSL SASL_CYRUS ZSTD CURL HDRHISTOGRAM SYSLOG SNAPPY SOCKEM SASL_SCRAM SASL_OAUTHBEARER OAUTHBEARER_OIDC CRC32C_HW"
|
||||
#define BUILT_WITH "GCC GXX PKGCONFIG INSTALL GNULD LDS LIBDL PLUGINS ZLIB SSL SASL_CYRUS ZSTD CURL HDRHISTOGRAM SYSLOG SNAPPY SOCKEM SASL_SCRAM SASL_OAUTHBEARER OAUTHBEARER_OIDC CRC32C_HW"
|
||||
#endif /* _CONFIG_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* 2023, Confluent Inc.
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -131,6 +130,7 @@ int RdKafka::socket_cb_trampoline(int domain,
|
|||
return handle->socket_cb_->socket_cb(domain, type, protocol);
|
||||
}
|
||||
|
||||
|
||||
int RdKafka::resolve_cb_trampoline(const char *node,
|
||||
const char *service,
|
||||
const struct addrinfo *hints,
|
||||
|
|
@ -152,6 +152,7 @@ int RdKafka::connect_cb_trampoline(int sockfd,
|
|||
return handle->connect_cb_->connect_cb(sockfd, addr, addrlen, id);
|
||||
}
|
||||
|
||||
|
||||
int RdKafka::open_cb_trampoline(const char *pathname,
|
||||
int flags,
|
||||
mode_t mode,
|
||||
|
|
@ -425,14 +426,6 @@ rd_kafka_topic_partition_list_t *partitions_to_c_parts(
|
|||
rd_kafka_topic_partition_t *rktpar = rd_kafka_topic_partition_list_add(
|
||||
c_parts, tpi->topic_.c_str(), tpi->partition_);
|
||||
rktpar->offset = tpi->offset_;
|
||||
if (tpi->metadata_.size()) {
|
||||
void *metadata_p = mem_malloc(tpi->metadata_.size());
|
||||
memcpy(metadata_p, tpi->metadata_.data(), tpi->metadata_.size());
|
||||
rktpar->metadata = metadata_p;
|
||||
rktpar->metadata_size = tpi->metadata_.size();
|
||||
}
|
||||
if (tpi->leader_epoch_ != -1)
|
||||
rd_kafka_topic_partition_set_leader_epoch(rktpar, tpi->leader_epoch_);
|
||||
}
|
||||
|
||||
return c_parts;
|
||||
|
|
@ -454,13 +447,8 @@ void update_partitions_from_c_parts(
|
|||
dynamic_cast<RdKafka::TopicPartitionImpl *>(partitions[j]);
|
||||
if (!strcmp(p->topic, pp->topic_.c_str()) &&
|
||||
p->partition == pp->partition_) {
|
||||
pp->offset_ = p->offset;
|
||||
pp->err_ = static_cast<RdKafka::ErrorCode>(p->err);
|
||||
pp->leader_epoch_ = rd_kafka_topic_partition_get_leader_epoch(p);
|
||||
if (p->metadata_size) {
|
||||
unsigned char *metadata = (unsigned char *)p->metadata;
|
||||
pp->metadata_.assign(metadata, metadata + p->metadata_size);
|
||||
}
|
||||
pp->offset_ = p->offset;
|
||||
pp->err_ = static_cast<RdKafka::ErrorCode>(p->err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2015-2022, Magnus Edenhill
|
||||
* Copyright (c) 2015 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2015-2022, Magnus Edenhill
|
||||
* Copyright (c) 2015 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* 2023, Confluent Inc.
|
||||
* Copyright (c) 2014-2022 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -121,7 +120,7 @@ namespace RdKafka {
|
|||
* @remark This value should only be used during compile time,
|
||||
* for runtime checks of version use RdKafka::version()
|
||||
*/
|
||||
#define RD_KAFKA_VERSION 0x020600ff
|
||||
#define RD_KAFKA_VERSION 0x020002ff
|
||||
|
||||
/**
|
||||
* @brief Returns the librdkafka version as integer.
|
||||
|
|
@ -334,8 +333,6 @@ enum ErrorCode {
|
|||
ERR__NOOP = -141,
|
||||
/** No offset to automatically reset to */
|
||||
ERR__AUTO_OFFSET_RESET = -140,
|
||||
/** Partition log truncation detected */
|
||||
ERR__LOG_TRUNCATION = -139,
|
||||
|
||||
/** End internal error codes */
|
||||
ERR__END = -100,
|
||||
|
|
@ -2064,18 +2061,6 @@ class RD_EXPORT TopicPartition {
|
|||
|
||||
/** @returns error code (if applicable) */
|
||||
virtual ErrorCode err() const = 0;
|
||||
|
||||
/** @brief Get partition leader epoch, or -1 if not known or relevant. */
|
||||
virtual int32_t get_leader_epoch() = 0;
|
||||
|
||||
/** @brief Set partition leader epoch. */
|
||||
virtual void set_leader_epoch(int32_t leader_epoch) = 0;
|
||||
|
||||
/** @brief Get partition metadata. */
|
||||
virtual std::vector<unsigned char> get_metadata() = 0;
|
||||
|
||||
/** @brief Set partition metadata. */
|
||||
virtual void set_metadata(std::vector<unsigned char> &metadata) = 0;
|
||||
};
|
||||
|
||||
|
||||
|
|
@ -2133,11 +2118,6 @@ class RD_EXPORT Topic {
|
|||
* The offset will be committed (written) to the broker (or file) according
|
||||
* to \p auto.commit.interval.ms or next manual offset-less commit call.
|
||||
*
|
||||
* @deprecated This API lacks support for partition leader epochs, which makes
|
||||
* it at risk for unclean leader election log truncation issues.
|
||||
* Use KafkaConsumer::offsets_store() or
|
||||
* Message::offset_store() instead.
|
||||
*
|
||||
* @remark \c enable.auto.offset.store must be set to \c false when using
|
||||
* this API.
|
||||
*
|
||||
|
|
@ -2568,31 +2548,6 @@ class RD_EXPORT Message {
|
|||
/** @returns the broker id of the broker the message was produced to or
|
||||
* fetched from, or -1 if not known/applicable. */
|
||||
virtual int32_t broker_id() const = 0;
|
||||
|
||||
/** @returns the message's partition leader epoch at the time the message was
|
||||
* fetched and if known, else -1. */
|
||||
virtual int32_t leader_epoch() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Store offset +1 for the consumed message.
|
||||
*
|
||||
* The message offset + 1 will be committed to broker according
|
||||
* to \c `auto.commit.interval.ms` or manual offset-less commit()
|
||||
*
|
||||
* @warning This method may only be called for partitions that are currently
|
||||
* assigned.
|
||||
* Non-assigned partitions will fail with ERR__STATE.
|
||||
*
|
||||
* @warning Avoid storing offsets after calling seek() (et.al) as
|
||||
* this may later interfere with resuming a paused partition, instead
|
||||
* store offsets prior to calling seek.
|
||||
*
|
||||
* @remark \c `enable.auto.offset.store` must be set to "false" when using
|
||||
* this API.
|
||||
*
|
||||
* @returns NULL on success or an error object on failure.
|
||||
*/
|
||||
virtual Error *offset_store() = 0;
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
|
@ -2993,9 +2948,6 @@ class RD_EXPORT KafkaConsumer : public virtual Handle {
|
|||
* @remark \c enable.auto.offset.store must be set to \c false when using
|
||||
* this API.
|
||||
*
|
||||
* @remark The leader epoch, if set, will be used to fence outdated partition
|
||||
* leaders. See TopicPartition::set_leader_epoch().
|
||||
*
|
||||
* @returns RdKafka::ERR_NO_ERROR on success, or
|
||||
* RdKafka::ERR___UNKNOWN_PARTITION if none of the offsets could
|
||||
* be stored, or
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* 2023, Confluent Inc.
|
||||
* Copyright (c) 2014 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -551,21 +550,6 @@ class MessageImpl : public Message {
|
|||
return rd_kafka_message_broker_id(rkmessage_);
|
||||
}
|
||||
|
||||
int32_t leader_epoch() const {
|
||||
return rd_kafka_message_leader_epoch(rkmessage_);
|
||||
}
|
||||
|
||||
|
||||
Error *offset_store() {
|
||||
rd_kafka_error_t *c_error;
|
||||
|
||||
c_error = rd_kafka_offset_store_message(rkmessage_);
|
||||
|
||||
if (c_error)
|
||||
return new ErrorImpl(c_error);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
RdKafka::Topic *topic_;
|
||||
rd_kafka_message_t *rkmessage_;
|
||||
|
|
@ -633,41 +617,6 @@ class ConfImpl : public Conf {
|
|||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
Conf::ConfResult set(const std::string &name,
|
||||
ResolveCb *resolve_cb,
|
||||
std::string &errstr) {
|
||||
if (name != "resolve_cb") {
|
||||
errstr = "Invalid value type, expected RdKafka::ResolveCb";
|
||||
return Conf::CONF_INVALID;
|
||||
}
|
||||
|
||||
if (!rk_conf_) {
|
||||
errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
|
||||
return Conf::CONF_INVALID;
|
||||
}
|
||||
|
||||
resolve_cb_ = resolve_cb;
|
||||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
|
||||
Conf::ConfResult set(const std::string &name,
|
||||
ConnectCb *connect_cb,
|
||||
std::string &errstr) {
|
||||
if (name != "connect_cb") {
|
||||
errstr = "Invalid value type, expected RdKafka::ConnectCb";
|
||||
return Conf::CONF_INVALID;
|
||||
}
|
||||
|
||||
if (!rk_conf_) {
|
||||
errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
|
||||
return Conf::CONF_INVALID;
|
||||
}
|
||||
|
||||
connect_cb_ = connect_cb;
|
||||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
Conf::ConfResult set(const std::string &name,
|
||||
OAuthBearerTokenRefreshCb *oauthbearer_token_refresh_cb,
|
||||
std::string &errstr) {
|
||||
|
|
@ -775,6 +724,41 @@ class ConfImpl : public Conf {
|
|||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
Conf::ConfResult set(const std::string &name,
|
||||
ResolveCb *resolve_cb,
|
||||
std::string &errstr) {
|
||||
if (name != "resolve_cb") {
|
||||
errstr = "Invalid value type, expected RdKafka::ResolveCb";
|
||||
return Conf::CONF_INVALID;
|
||||
}
|
||||
|
||||
if (!rk_conf_) {
|
||||
errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
|
||||
return Conf::CONF_INVALID;
|
||||
}
|
||||
|
||||
resolve_cb_ = resolve_cb;
|
||||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
|
||||
Conf::ConfResult set(const std::string &name,
|
||||
ConnectCb *connect_cb,
|
||||
std::string &errstr) {
|
||||
if (name != "connect_cb") {
|
||||
errstr = "Invalid value type, expected RdKafka::ConnectCb";
|
||||
return Conf::CONF_INVALID;
|
||||
}
|
||||
|
||||
if (!rk_conf_) {
|
||||
errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
|
||||
return Conf::CONF_INVALID;
|
||||
}
|
||||
|
||||
connect_cb_ = connect_cb;
|
||||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
|
||||
Conf::ConfResult set(const std::string &name,
|
||||
OpenCb *open_cb,
|
||||
|
|
@ -942,20 +926,6 @@ class ConfImpl : public Conf {
|
|||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
Conf::ConfResult get(ResolveCb *&resolve_cb) const {
|
||||
if (!rk_conf_)
|
||||
return Conf::CONF_INVALID;
|
||||
resolve_cb = this->resolve_cb_;
|
||||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
Conf::ConfResult get(ConnectCb *&connect_cb) const {
|
||||
if (!rk_conf_)
|
||||
return Conf::CONF_INVALID;
|
||||
connect_cb = this->connect_cb_;
|
||||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
Conf::ConfResult get(
|
||||
OAuthBearerTokenRefreshCb *&oauthbearer_token_refresh_cb) const {
|
||||
if (!rk_conf_)
|
||||
|
|
@ -992,6 +962,20 @@ class ConfImpl : public Conf {
|
|||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
Conf::ConfResult get(ResolveCb *&resolve_cb) const {
|
||||
if (!rk_conf_)
|
||||
return Conf::CONF_INVALID;
|
||||
resolve_cb = this->resolve_cb_;
|
||||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
Conf::ConfResult get(ConnectCb *&connect_cb) const {
|
||||
if (!rk_conf_)
|
||||
return Conf::CONF_INVALID;
|
||||
connect_cb = this->connect_cb_;
|
||||
return Conf::CONF_OK;
|
||||
}
|
||||
|
||||
Conf::ConfResult get(OpenCb *&open_cb) const {
|
||||
if (!rk_conf_)
|
||||
return Conf::CONF_INVALID;
|
||||
|
|
@ -1310,28 +1294,22 @@ class TopicPartitionImpl : public TopicPartition {
|
|||
topic_(topic),
|
||||
partition_(partition),
|
||||
offset_(RdKafka::Topic::OFFSET_INVALID),
|
||||
err_(ERR_NO_ERROR),
|
||||
leader_epoch_(-1) {
|
||||
err_(ERR_NO_ERROR) {
|
||||
}
|
||||
|
||||
TopicPartitionImpl(const std::string &topic, int partition, int64_t offset) :
|
||||
topic_(topic),
|
||||
partition_(partition),
|
||||
offset_(offset),
|
||||
err_(ERR_NO_ERROR),
|
||||
leader_epoch_(-1) {
|
||||
err_(ERR_NO_ERROR) {
|
||||
}
|
||||
|
||||
TopicPartitionImpl(const rd_kafka_topic_partition_t *c_part) {
|
||||
topic_ = std::string(c_part->topic);
|
||||
partition_ = c_part->partition;
|
||||
offset_ = c_part->offset;
|
||||
err_ = static_cast<ErrorCode>(c_part->err);
|
||||
leader_epoch_ = rd_kafka_topic_partition_get_leader_epoch(c_part);
|
||||
if (c_part->metadata_size > 0) {
|
||||
unsigned char *metadata = (unsigned char *)c_part->metadata;
|
||||
metadata_.assign(metadata, metadata + c_part->metadata_size);
|
||||
}
|
||||
topic_ = std::string(c_part->topic);
|
||||
partition_ = c_part->partition;
|
||||
offset_ = c_part->offset;
|
||||
err_ = static_cast<ErrorCode>(c_part->err);
|
||||
// FIXME: metadata
|
||||
}
|
||||
|
||||
static void destroy(std::vector<TopicPartition *> &partitions);
|
||||
|
|
@ -1355,22 +1333,6 @@ class TopicPartitionImpl : public TopicPartition {
|
|||
offset_ = offset;
|
||||
}
|
||||
|
||||
int32_t get_leader_epoch() {
|
||||
return leader_epoch_;
|
||||
}
|
||||
|
||||
void set_leader_epoch(int32_t leader_epoch) {
|
||||
leader_epoch_ = leader_epoch;
|
||||
}
|
||||
|
||||
std::vector<unsigned char> get_metadata() {
|
||||
return metadata_;
|
||||
}
|
||||
|
||||
void set_metadata(std::vector<unsigned char> &metadata) {
|
||||
metadata_ = metadata;
|
||||
}
|
||||
|
||||
std::ostream &operator<<(std::ostream &ostrm) const {
|
||||
return ostrm << topic_ << " [" << partition_ << "]";
|
||||
}
|
||||
|
|
@ -1379,8 +1341,6 @@ class TopicPartitionImpl : public TopicPartition {
|
|||
int partition_;
|
||||
int64_t offset_;
|
||||
ErrorCode err_;
|
||||
int32_t leader_epoch_;
|
||||
std::vector<unsigned char> metadata_;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,30 +0,0 @@
|
|||
# Instructions for Updating KLZ4 Version
|
||||
|
||||
This document describes the steps to update the bundled lz4 version, that is,
|
||||
the version used when `./configure` is run with `--disable-lz4-ext`.
|
||||
|
||||
1. For each file in the [lz4 repository's](https://github.com/lz4/lz4/) `lib`
|
||||
directory (checked out to the appropriate version tag), copy it into the
|
||||
librdkafka `src` directory, overwriting the previous files.
|
||||
2. Copy `xxhash.h` and `xxhash.c` files, and rename them to `rdxxhash.h` and
|
||||
`rdxxhash.c`, respectively, replacing the previous files. Change any
|
||||
`#include`s of `xxhash.h` to `rdxxhash.h`.
|
||||
3. Replace the `#else` block of the
|
||||
`#if defined(KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)`
|
||||
with the following code, including the comment:
|
||||
```c
|
||||
#else
|
||||
/* NOTE: While upgrading the lz4 version, replace the original `#else` block
|
||||
* in the code with this block, and retain this comment. */
|
||||
struct rdkafka_s;
|
||||
extern void *rd_kafka_mem_malloc(struct rdkafka_s *rk, size_t s);
|
||||
extern void *rd_kafka_mem_calloc(struct rdkafka_s *rk, size_t n, size_t s);
|
||||
extern void rd_kafka_mem_free(struct rdkafka_s *rk, void *p);
|
||||
# define ALLOC(s) rd_kafka_mem_malloc(NULL, s)
|
||||
# define ALLOC_AND_ZERO(s) rd_kafka_mem_calloc(NULL, 1, s)
|
||||
# define FREEMEM(p) rd_kafka_mem_free(NULL, p)
|
||||
#endif
|
||||
```
|
||||
4. Change version mentioned for lz4 in `configure.self`.
|
||||
4. Run `./configure` with `--disable-lz4-ext` option, make and run test 0017.
|
||||
5. Update CHANGELOG.md and both the lz4 LICENSE, and the combined LICENSE.
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,398 @@
|
|||
/*
|
||||
Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef cJSON__h
|
||||
#define cJSON__h
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if !defined(__WINDOWS__) && \
|
||||
(defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32))
|
||||
#define __WINDOWS__
|
||||
#endif
|
||||
|
||||
#ifdef __WINDOWS__
|
||||
|
||||
/* When compiling for windows, we specify a specific calling convention to avoid
|
||||
issues where we are being called from a project with a different default calling
|
||||
convention. For windows you have 3 define options:
|
||||
|
||||
CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever
|
||||
dllexport symbols CJSON_EXPORT_SYMBOLS - Define this on library build when you
|
||||
want to dllexport symbols (default) CJSON_IMPORT_SYMBOLS - Define this if you
|
||||
want to dllimport symbol
|
||||
|
||||
For *nix builds that support visibility attribute, you can define similar
|
||||
behavior by
|
||||
|
||||
setting default visibility to hidden by adding
|
||||
-fvisibility=hidden (for gcc)
|
||||
or
|
||||
-xldscope=hidden (for sun cc)
|
||||
to CFLAGS
|
||||
|
||||
then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way
|
||||
CJSON_EXPORT_SYMBOLS does
|
||||
|
||||
*/
|
||||
|
||||
#define CJSON_CDECL __cdecl
|
||||
#define CJSON_STDCALL __stdcall
|
||||
|
||||
/* export symbols by default, this is necessary for copy pasting the C and
|
||||
* header file */
|
||||
#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && \
|
||||
!defined(CJSON_EXPORT_SYMBOLS)
|
||||
#define CJSON_EXPORT_SYMBOLS
|
||||
#endif
|
||||
|
||||
#if defined(CJSON_HIDE_SYMBOLS)
|
||||
#define CJSON_PUBLIC(type) type CJSON_STDCALL
|
||||
#elif defined(CJSON_EXPORT_SYMBOLS)
|
||||
#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL
|
||||
#elif defined(CJSON_IMPORT_SYMBOLS)
|
||||
#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL
|
||||
#endif
|
||||
#else /* !__WINDOWS__ */
|
||||
#define CJSON_CDECL
|
||||
#define CJSON_STDCALL
|
||||
|
||||
#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined(__SUNPRO_C)) && \
|
||||
defined(CJSON_API_VISIBILITY)
|
||||
#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type
|
||||
#else
|
||||
#define CJSON_PUBLIC(type) type
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* project version */
|
||||
#define CJSON_VERSION_MAJOR 1
|
||||
#define CJSON_VERSION_MINOR 7
|
||||
#define CJSON_VERSION_PATCH 14
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
/* cJSON Types: */
|
||||
#define cJSON_Invalid (0)
|
||||
#define cJSON_False (1 << 0)
|
||||
#define cJSON_True (1 << 1)
|
||||
#define cJSON_NULL (1 << 2)
|
||||
#define cJSON_Number (1 << 3)
|
||||
#define cJSON_String (1 << 4)
|
||||
#define cJSON_Array (1 << 5)
|
||||
#define cJSON_Object (1 << 6)
|
||||
#define cJSON_Raw (1 << 7) /* raw json */
|
||||
|
||||
#define cJSON_IsReference 256
|
||||
#define cJSON_StringIsConst 512
|
||||
|
||||
/* The cJSON structure: */
|
||||
typedef struct cJSON {
|
||||
/* next/prev allow you to walk array/object chains. Alternatively, use
|
||||
* GetArraySize/GetArrayItem/GetObjectItem */
|
||||
struct cJSON *next;
|
||||
struct cJSON *prev;
|
||||
/* An array or object item will have a child pointer pointing to a chain
|
||||
* of the items in the array/object. */
|
||||
struct cJSON *child;
|
||||
|
||||
/* The type of the item, as above. */
|
||||
int type;
|
||||
|
||||
/* The item's string, if type==cJSON_String and type == cJSON_Raw */
|
||||
char *valuestring;
|
||||
/* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead
|
||||
*/
|
||||
int valueint;
|
||||
/* The item's number, if type==cJSON_Number */
|
||||
double valuedouble;
|
||||
|
||||
/* The item's name string, if this item is the child of, or is in the
|
||||
* list of subitems of an object. */
|
||||
char *string;
|
||||
} cJSON;
|
||||
|
||||
typedef struct cJSON_Hooks {
|
||||
/* malloc/free are CDECL on Windows regardless of the default calling
|
||||
* convention of the compiler, so ensure the hooks allow passing those
|
||||
* functions directly. */
|
||||
void *(CJSON_CDECL *malloc_fn)(size_t sz);
|
||||
void(CJSON_CDECL *free_fn)(void *ptr);
|
||||
} cJSON_Hooks;
|
||||
|
||||
typedef int cJSON_bool;
|
||||
|
||||
/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse
|
||||
* them. This is to prevent stack overflows. */
|
||||
#ifndef CJSON_NESTING_LIMIT
|
||||
#define CJSON_NESTING_LIMIT 1000
|
||||
#endif
|
||||
|
||||
/* returns the version of cJSON as a string */
|
||||
CJSON_PUBLIC(const char *) cJSON_Version(void);
|
||||
|
||||
/* Supply malloc, realloc and free functions to cJSON */
|
||||
CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks *hooks);
|
||||
|
||||
/* Memory Management: the caller is always responsible to free the results from
|
||||
* all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib
|
||||
* free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is
|
||||
* cJSON_PrintPreallocated, where the caller has full responsibility of the
|
||||
* buffer. */
|
||||
/* Supply a block of JSON, and this returns a cJSON object you can interrogate.
|
||||
*/
|
||||
CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_ParseWithLength(const char *value, size_t buffer_length);
|
||||
/* ParseWithOpts allows you to require (and check) that the JSON is null
|
||||
* terminated, and to retrieve the pointer to the final byte parsed. */
|
||||
/* If you supply a ptr in return_parse_end and parsing fails, then
|
||||
* return_parse_end will contain a pointer to the error so will match
|
||||
* cJSON_GetErrorPtr(). */
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_ParseWithOpts(const char *value,
|
||||
const char **return_parse_end,
|
||||
cJSON_bool require_null_terminated);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_ParseWithLengthOpts(const char *value,
|
||||
size_t buffer_length,
|
||||
const char **return_parse_end,
|
||||
cJSON_bool require_null_terminated);
|
||||
|
||||
/* Render a cJSON entity to text for transfer/storage. */
|
||||
CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item);
|
||||
/* Render a cJSON entity to text for transfer/storage without any formatting. */
|
||||
CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item);
|
||||
/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess
|
||||
* at the final size. guessing well reduces reallocation. fmt=0 gives
|
||||
* unformatted, =1 gives formatted */
|
||||
CJSON_PUBLIC(char *)
|
||||
cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt);
|
||||
/* Render a cJSON entity to text using a buffer already allocated in memory with
|
||||
* given length. Returns 1 on success and 0 on failure. */
|
||||
/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will
|
||||
* use, so to be safe allocate 5 bytes more than you actually need */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_PrintPreallocated(cJSON *item,
|
||||
char *buffer,
|
||||
const int length,
|
||||
const cJSON_bool format);
|
||||
/* Delete a cJSON entity and all subentities. */
|
||||
CJSON_PUBLIC(void) cJSON_Delete(cJSON *item);
|
||||
|
||||
/* Returns the number of items in an array (or object). */
|
||||
CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array);
|
||||
/* Retrieve item number "index" from array "array". Returns NULL if
|
||||
* unsuccessful. */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index);
|
||||
/* Get item "string" from object. Case insensitive. */
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_GetObjectItem(const cJSON *const object, const char *const string);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_GetObjectItemCaseSensitive(const cJSON *const object,
|
||||
const char *const string);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_HasObjectItem(const cJSON *object, const char *string);
|
||||
/* For analysing failed parses. This returns a pointer to the parse error.
|
||||
* You'll probably need to look a few chars back to make sense of it. Defined
|
||||
* when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */
|
||||
CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void);
|
||||
|
||||
/* Check item type and return its value */
|
||||
CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON *const item);
|
||||
CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON *const item);
|
||||
|
||||
/* These functions check the type of an item */
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON *const item);
|
||||
|
||||
/* These calls create a cJSON item of the appropriate type. */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string);
|
||||
/* raw json */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void);
|
||||
|
||||
/* Create a string where valuestring references a string so
|
||||
* it will not be freed by cJSON_Delete */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string);
|
||||
/* Create an object/array that only references it's elements so
|
||||
* they will not be freed by cJSON_Delete */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child);
|
||||
|
||||
/* These utilities create an Array of count items.
|
||||
* The parameter count cannot be greater than the number of elements in the
|
||||
* number array, otherwise array access will be out of bounds.*/
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_CreateStringArray(const char *const *strings, int count);
|
||||
|
||||
/* Append item to the specified array/object. */
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item);
|
||||
/* Use this when string is definitely const (i.e. a literal, or as good as), and
|
||||
* will definitely survive the cJSON object. WARNING: When this function was
|
||||
* used, make sure to always check that (item->type & cJSON_StringIsConst) is
|
||||
* zero before writing to `item->string` */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item);
|
||||
/* Append reference to item to the specified array/object. Use this when you
|
||||
* want to add an existing cJSON to a new cJSON, but don't want to corrupt your
|
||||
* existing cJSON. */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item);
|
||||
|
||||
/* Remove/Detach items from Arrays/Objects. */
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_DetachItemViaPointer(cJSON *parent, cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which);
|
||||
CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_DetachItemFromObject(cJSON *object, const char *string);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string);
|
||||
CJSON_PUBLIC(void)
|
||||
cJSON_DeleteItemFromObject(cJSON *object, const char *string);
|
||||
CJSON_PUBLIC(void)
|
||||
cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string);
|
||||
|
||||
/* Update array items. */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_InsertItemInArray(
|
||||
cJSON *array,
|
||||
int which,
|
||||
cJSON *newitem); /* Shifts pre-existing items to the right. */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_ReplaceItemViaPointer(cJSON *const parent,
|
||||
cJSON *const item,
|
||||
cJSON *replacement);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,
|
||||
const char *string,
|
||||
cJSON *newitem);
|
||||
|
||||
/* Duplicate a cJSON item */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse);
|
||||
/* Duplicate will create a new, identical cJSON item to the one you pass, in new
|
||||
* memory that will need to be released. With recurse!=0, it will duplicate any
|
||||
* children connected to the item.
|
||||
* The item->next and ->prev pointers are always zero on return from Duplicate.
|
||||
*/
|
||||
/* Recursively compare two cJSON items for equality. If either a or b is NULL or
|
||||
* invalid, they will be considered unequal.
|
||||
* case_sensitive determines if object keys are treated case sensitive (1) or
|
||||
* case insensitive (0) */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_Compare(const cJSON *const a,
|
||||
const cJSON *const b,
|
||||
const cJSON_bool case_sensitive);
|
||||
|
||||
/* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from
|
||||
* strings. The input pointer json cannot point to a read-only address area,
|
||||
* such as a string constant,
|
||||
* but should point to a readable and writable adress area. */
|
||||
CJSON_PUBLIC(void) cJSON_Minify(char *json);
|
||||
|
||||
/* Helper functions for creating and adding items to an object at the same time.
|
||||
* They return the added item or NULL on failure. */
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddNullToObject(cJSON *const object, const char *const name);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddTrueToObject(cJSON *const object, const char *const name);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddFalseToObject(cJSON *const object, const char *const name);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddBoolToObject(cJSON *const object,
|
||||
const char *const name,
|
||||
const cJSON_bool boolean);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddNumberToObject(cJSON *const object,
|
||||
const char *const name,
|
||||
const double number);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddStringToObject(cJSON *const object,
|
||||
const char *const name,
|
||||
const char *const string);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddRawToObject(cJSON *const object,
|
||||
const char *const name,
|
||||
const char *const raw);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddObjectToObject(cJSON *const object, const char *const name);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddArrayToObject(cJSON *const object, const char *const name);
|
||||
|
||||
/* When assigning an integer value, it needs to be propagated to valuedouble
|
||||
* too. */
|
||||
#define cJSON_SetIntValue(object, number) \
|
||||
((object) ? (object)->valueint = (object)->valuedouble = (number) \
|
||||
: (number))
|
||||
/* helper for the cJSON_SetNumberValue macro */
|
||||
CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number);
|
||||
#define cJSON_SetNumberValue(object, number) \
|
||||
((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) \
|
||||
: (number))
|
||||
/* Change the valuestring of a cJSON_String object, only takes effect when type
|
||||
* of object is cJSON_String */
|
||||
CJSON_PUBLIC(char *)
|
||||
cJSON_SetValuestring(cJSON *object, const char *valuestring);
|
||||
|
||||
/* Macro for iterating over an array or object */
|
||||
#define cJSON_ArrayForEach(element, array) \
|
||||
for (element = (array != NULL) ? (array)->child : NULL; \
|
||||
element != NULL; element = element->next)
|
||||
|
||||
/* malloc/free objects using the malloc/free functions that have been set with
|
||||
* cJSON_InitHooks */
|
||||
CJSON_PUBLIC(void *) cJSON_malloc(size_t size);
|
||||
CJSON_PUBLIC(void) cJSON_free(void *object);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2017-2022, Magnus Edenhill
|
||||
* Copyright (c) 2017 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* KLZ4 - Fast LZ compression algorithm
|
||||
* Header File
|
||||
* Copyright (C) 2011-2020, Yann Collet.
|
||||
* Copyright (C) 2011-present, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
|
|
@ -97,77 +97,36 @@ extern "C" {
|
|||
# define KLZ4LIB_API KLZ4LIB_VISIBILITY
|
||||
#endif
|
||||
|
||||
/*! KLZ4_FREESTANDING :
|
||||
* When this macro is set to 1, it enables "freestanding mode" that is
|
||||
* suitable for typical freestanding environment which doesn't support
|
||||
* standard C library.
|
||||
*
|
||||
* - KLZ4_FREESTANDING is a compile-time switch.
|
||||
* - It requires the following macros to be defined:
|
||||
* KLZ4_memcpy, KLZ4_memmove, KLZ4_memset.
|
||||
* - It only enables KLZ4/HC functions which don't use heap.
|
||||
* All KLZ4F_* functions are not supported.
|
||||
* - See tests/freestanding.c to check its basic setup.
|
||||
*/
|
||||
#if defined(KLZ4_FREESTANDING) && (KLZ4_FREESTANDING == 1)
|
||||
# define KLZ4_HEAPMODE 0
|
||||
# define KLZ4HC_HEAPMODE 0
|
||||
# define KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION 1
|
||||
# if !defined(KLZ4_memcpy)
|
||||
# error "KLZ4_FREESTANDING requires macro 'KLZ4_memcpy'."
|
||||
# endif
|
||||
# if !defined(KLZ4_memset)
|
||||
# error "KLZ4_FREESTANDING requires macro 'KLZ4_memset'."
|
||||
# endif
|
||||
# if !defined(KLZ4_memmove)
|
||||
# error "KLZ4_FREESTANDING requires macro 'KLZ4_memmove'."
|
||||
# endif
|
||||
#elif ! defined(KLZ4_FREESTANDING)
|
||||
# define KLZ4_FREESTANDING 0
|
||||
#endif
|
||||
|
||||
|
||||
/*------ Version ------*/
|
||||
#define KLZ4_VERSION_MAJOR 1 /* for breaking interface changes */
|
||||
#define KLZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */
|
||||
#define KLZ4_VERSION_RELEASE 4 /* for tweaks, bug-fixes, or development */
|
||||
#define KLZ4_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */
|
||||
|
||||
#define KLZ4_VERSION_NUMBER (KLZ4_VERSION_MAJOR *100*100 + KLZ4_VERSION_MINOR *100 + KLZ4_VERSION_RELEASE)
|
||||
|
||||
#define KLZ4_LIB_VERSION KLZ4_VERSION_MAJOR.KLZ4_VERSION_MINOR.KLZ4_VERSION_RELEASE
|
||||
#define KLZ4_QUOTE(str) #str
|
||||
#define KLZ4_EXPAND_AND_QUOTE(str) KLZ4_QUOTE(str)
|
||||
#define KLZ4_VERSION_STRING KLZ4_EXPAND_AND_QUOTE(KLZ4_LIB_VERSION) /* requires v1.7.3+ */
|
||||
#define KLZ4_VERSION_STRING KLZ4_EXPAND_AND_QUOTE(KLZ4_LIB_VERSION)
|
||||
|
||||
KLZ4LIB_API int KLZ4_versionNumber (void); /**< library version number; useful to check dll version; requires v1.3.0+ */
|
||||
KLZ4LIB_API const char* KLZ4_versionString (void); /**< library version string; useful to check dll version; requires v1.7.5+ */
|
||||
KLZ4LIB_API int KLZ4_versionNumber (void); /**< library version number; useful to check dll version */
|
||||
KLZ4LIB_API const char* KLZ4_versionString (void); /**< library version string; useful to check dll version */
|
||||
|
||||
|
||||
/*-************************************
|
||||
* Tuning parameter
|
||||
**************************************/
|
||||
#define KLZ4_MEMORY_USAGE_MIN 10
|
||||
#define KLZ4_MEMORY_USAGE_DEFAULT 14
|
||||
#define KLZ4_MEMORY_USAGE_MAX 20
|
||||
|
||||
/*!
|
||||
* KLZ4_MEMORY_USAGE :
|
||||
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; )
|
||||
* Increasing memory usage improves compression ratio, at the cost of speed.
|
||||
* Reduced memory usage may improve speed at the cost of ratio, thanks to better cache locality.
|
||||
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
|
||||
* Increasing memory usage improves compression ratio.
|
||||
* Reduced memory usage may improve speed, thanks to better cache locality.
|
||||
* Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
|
||||
*/
|
||||
#ifndef KLZ4_MEMORY_USAGE
|
||||
# define KLZ4_MEMORY_USAGE KLZ4_MEMORY_USAGE_DEFAULT
|
||||
# define KLZ4_MEMORY_USAGE 14
|
||||
#endif
|
||||
|
||||
#if (KLZ4_MEMORY_USAGE < KLZ4_MEMORY_USAGE_MIN)
|
||||
# error "KLZ4_MEMORY_USAGE is too small !"
|
||||
#endif
|
||||
|
||||
#if (KLZ4_MEMORY_USAGE > KLZ4_MEMORY_USAGE_MAX)
|
||||
# error "KLZ4_MEMORY_USAGE is too large !"
|
||||
#endif
|
||||
|
||||
/*-************************************
|
||||
* Simple Functions
|
||||
|
|
@ -311,25 +270,8 @@ KLZ4LIB_API int KLZ4_decompress_safe_partial (const char* src, char* dst, int sr
|
|||
***********************************************/
|
||||
typedef union KLZ4_stream_u KLZ4_stream_t; /* incomplete type (defined later) */
|
||||
|
||||
/**
|
||||
Note about RC_INVOKED
|
||||
|
||||
- RC_INVOKED is predefined symbol of rc.exe (the resource compiler which is part of MSVC/Visual Studio).
|
||||
https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros
|
||||
|
||||
- Since rc.exe is a legacy compiler, it truncates long symbol (> 30 chars)
|
||||
and reports warning "RC4011: identifier truncated".
|
||||
|
||||
- To eliminate the warning, we surround long preprocessor symbol with
|
||||
"#if !defined(RC_INVOKED) ... #endif" block that means
|
||||
"skip this block when rc.exe is trying to read it".
|
||||
*/
|
||||
#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */
|
||||
#if !defined(KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
|
||||
KLZ4LIB_API KLZ4_stream_t* KLZ4_createStream(void);
|
||||
KLZ4LIB_API int KLZ4_freeStream (KLZ4_stream_t* streamPtr);
|
||||
#endif /* !defined(KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */
|
||||
#endif
|
||||
|
||||
/*! KLZ4_resetStream_fast() : v1.9.0+
|
||||
* Use this to prepare an KLZ4_stream_t for a new chain of dependent blocks
|
||||
|
|
@ -413,12 +355,8 @@ typedef union KLZ4_streamDecode_u KLZ4_streamDecode_t; /* tracking context */
|
|||
* creation / destruction of streaming decompression tracking context.
|
||||
* A tracking context can be re-used multiple times.
|
||||
*/
|
||||
#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */
|
||||
#if !defined(KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
|
||||
KLZ4LIB_API KLZ4_streamDecode_t* KLZ4_createStreamDecode(void);
|
||||
KLZ4LIB_API int KLZ4_freeStreamDecode (KLZ4_streamDecode_t* KLZ4_stream);
|
||||
#endif /* !defined(KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */
|
||||
#endif
|
||||
|
||||
/*! KLZ4_setStreamDecode() :
|
||||
* An KLZ4_streamDecode_t context can be allocated once and re-used multiple times.
|
||||
|
|
@ -468,10 +406,7 @@ KLZ4LIB_API int KLZ4_decoderRingBufferSize(int maxBlockSize);
|
|||
* save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression,
|
||||
* then indicate where this data is saved using KLZ4_setStreamDecode(), before decompressing next block.
|
||||
*/
|
||||
KLZ4LIB_API int
|
||||
KLZ4_decompress_safe_continue (KLZ4_streamDecode_t* KLZ4_streamDecode,
|
||||
const char* src, char* dst,
|
||||
int srcSize, int dstCapacity);
|
||||
KLZ4LIB_API int KLZ4_decompress_safe_continue (KLZ4_streamDecode_t* KLZ4_streamDecode, const char* src, char* dst, int srcSize, int dstCapacity);
|
||||
|
||||
|
||||
/*! KLZ4_decompress_*_usingDict() :
|
||||
|
|
@ -482,16 +417,7 @@ KLZ4_decompress_safe_continue (KLZ4_streamDecode_t* KLZ4_streamDecode,
|
|||
* Performance tip : Decompression speed can be substantially increased
|
||||
* when dst == dictStart + dictSize.
|
||||
*/
|
||||
KLZ4LIB_API int
|
||||
KLZ4_decompress_safe_usingDict(const char* src, char* dst,
|
||||
int srcSize, int dstCapacity,
|
||||
const char* dictStart, int dictSize);
|
||||
|
||||
KLZ4LIB_API int
|
||||
KLZ4_decompress_safe_partial_usingDict(const char* src, char* dst,
|
||||
int compressedSize,
|
||||
int targetOutputSize, int maxOutputSize,
|
||||
const char* dictStart, int dictSize);
|
||||
KLZ4LIB_API int KLZ4_decompress_safe_usingDict (const char* src, char* dst, int srcSize, int dstCapcity, const char* dictStart, int dictSize);
|
||||
|
||||
#endif /* KLZ4_H_2983827168210 */
|
||||
|
||||
|
|
@ -570,15 +496,13 @@ KLZ4LIB_STATIC_API int KLZ4_compress_fast_extState_fastReset (void* state, const
|
|||
* stream (and source buffer) must remain in-place / accessible / unchanged
|
||||
* through the completion of the first compression call on the stream.
|
||||
*/
|
||||
KLZ4LIB_STATIC_API void
|
||||
KLZ4_attach_dictionary(KLZ4_stream_t* workingStream,
|
||||
const KLZ4_stream_t* dictionaryStream);
|
||||
KLZ4LIB_STATIC_API void KLZ4_attach_dictionary(KLZ4_stream_t* workingStream, const KLZ4_stream_t* dictionaryStream);
|
||||
|
||||
|
||||
/*! In-place compression and decompression
|
||||
*
|
||||
* It's possible to have input and output sharing the same buffer,
|
||||
* for highly constrained memory environments.
|
||||
* for highly contrained memory environments.
|
||||
* In both cases, it requires input to lay at the end of the buffer,
|
||||
* and decompression to start at beginning of the buffer.
|
||||
* Buffer size must feature some margin, hence be larger than final size.
|
||||
|
|
@ -668,26 +592,38 @@ KLZ4_attach_dictionary(KLZ4_stream_t* workingStream,
|
|||
typedef unsigned int KLZ4_u32;
|
||||
#endif
|
||||
|
||||
/*! KLZ4_stream_t :
|
||||
* Never ever use below internal definitions directly !
|
||||
* These definitions are not API/ABI safe, and may change in future versions.
|
||||
* If you need static allocation, declare or allocate an KLZ4_stream_t object.
|
||||
**/
|
||||
|
||||
typedef struct KLZ4_stream_t_internal KLZ4_stream_t_internal;
|
||||
struct KLZ4_stream_t_internal {
|
||||
KLZ4_u32 hashTable[KLZ4_HASH_SIZE_U32];
|
||||
const KLZ4_byte* dictionary;
|
||||
const KLZ4_stream_t_internal* dictCtx;
|
||||
KLZ4_u32 currentOffset;
|
||||
KLZ4_u32 tableType;
|
||||
const KLZ4_byte* dictionary;
|
||||
const KLZ4_stream_t_internal* dictCtx;
|
||||
KLZ4_u32 dictSize;
|
||||
/* Implicit padding to ensure structure is aligned */
|
||||
};
|
||||
|
||||
#define KLZ4_STREAM_MINSIZE ((1UL << KLZ4_MEMORY_USAGE) + 32) /* static size, for inter-version compatibility */
|
||||
typedef struct {
|
||||
const KLZ4_byte* externalDict;
|
||||
size_t extDictSize;
|
||||
const KLZ4_byte* prefixEnd;
|
||||
size_t prefixSize;
|
||||
} KLZ4_streamDecode_t_internal;
|
||||
|
||||
|
||||
/*! KLZ4_stream_t :
|
||||
* Do not use below internal definitions directly !
|
||||
* Declare or allocate an KLZ4_stream_t instead.
|
||||
* KLZ4_stream_t can also be created using KLZ4_createStream(), which is recommended.
|
||||
* The structure definition can be convenient for static allocation
|
||||
* (on stack, or as part of larger structure).
|
||||
* Init this structure with KLZ4_initStream() before first use.
|
||||
* note : only use this definition in association with static linking !
|
||||
* this definition is not API/ABI safe, and may change in future versions.
|
||||
*/
|
||||
#define KLZ4_STREAMSIZE 16416 /* static size, for inter-version compatibility */
|
||||
#define KLZ4_STREAMSIZE_VOIDP (KLZ4_STREAMSIZE / sizeof(void*))
|
||||
union KLZ4_stream_u {
|
||||
char minStateSize[KLZ4_STREAM_MINSIZE];
|
||||
void* table[KLZ4_STREAMSIZE_VOIDP];
|
||||
KLZ4_stream_t_internal internal_donotuse;
|
||||
}; /* previously typedef'd to KLZ4_stream_t */
|
||||
|
||||
|
|
@ -705,25 +641,21 @@ union KLZ4_stream_u {
|
|||
* In which case, the function will @return NULL.
|
||||
* Note2: An KLZ4_stream_t structure guarantees correct alignment and size.
|
||||
* Note3: Before v1.9.0, use KLZ4_resetStream() instead
|
||||
**/
|
||||
*/
|
||||
KLZ4LIB_API KLZ4_stream_t* KLZ4_initStream (void* buffer, size_t size);
|
||||
|
||||
|
||||
/*! KLZ4_streamDecode_t :
|
||||
* Never ever use below internal definitions directly !
|
||||
* These definitions are not API/ABI safe, and may change in future versions.
|
||||
* If you need static allocation, declare or allocate an KLZ4_streamDecode_t object.
|
||||
**/
|
||||
typedef struct {
|
||||
const KLZ4_byte* externalDict;
|
||||
const KLZ4_byte* prefixEnd;
|
||||
size_t extDictSize;
|
||||
size_t prefixSize;
|
||||
} KLZ4_streamDecode_t_internal;
|
||||
|
||||
#define KLZ4_STREAMDECODE_MINSIZE 32
|
||||
* information structure to track an KLZ4 stream during decompression.
|
||||
* init this structure using KLZ4_setStreamDecode() before first use.
|
||||
* note : only use in association with static linking !
|
||||
* this definition is not API/ABI safe,
|
||||
* and may change in a future version !
|
||||
*/
|
||||
#define KLZ4_STREAMDECODESIZE_U64 (4 + ((sizeof(void*)==16) ? 2 : 0) /*AS-400*/ )
|
||||
#define KLZ4_STREAMDECODESIZE (KLZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long))
|
||||
union KLZ4_streamDecode_u {
|
||||
char minStateSize[KLZ4_STREAMDECODE_MINSIZE];
|
||||
unsigned long long table[KLZ4_STREAMDECODESIZE_U64];
|
||||
KLZ4_streamDecode_t_internal internal_donotuse;
|
||||
} ; /* previously typedef'd to KLZ4_streamDecode_t */
|
||||
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
KLZ4F - KLZ4-Frame library
|
||||
KLZ4 auto-framing library
|
||||
Header File
|
||||
Copyright (C) 2011-2020, Yann Collet.
|
||||
Copyright (C) 2011-2017, Yann Collet.
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -39,7 +39,7 @@
|
|||
* KLZ4F also offers streaming capabilities.
|
||||
*
|
||||
* lz4.h is not required when using lz4frame.h,
|
||||
* except to extract common constants such as KLZ4_VERSION_NUMBER.
|
||||
* except to extract common constant such as KLZ4_VERSION_NUMBER.
|
||||
* */
|
||||
|
||||
#ifndef KLZ4F_H_09782039843
|
||||
|
|
@ -54,12 +54,12 @@ extern "C" {
|
|||
|
||||
|
||||
/**
|
||||
* Introduction
|
||||
*
|
||||
* lz4frame.h implements KLZ4 frame specification: see doc/lz4_Frame_format.md .
|
||||
* KLZ4 Frames are compatible with `lz4` CLI,
|
||||
* and designed to be interoperable with any system.
|
||||
**/
|
||||
Introduction
|
||||
|
||||
lz4frame.h implements KLZ4 frame specification (doc/lz4_Frame_format.md).
|
||||
lz4frame.h provides frame compression functions that take care
|
||||
of encoding standard metadata alongside KLZ4-compressed blocks.
|
||||
*/
|
||||
|
||||
/*-***************************************************************
|
||||
* Compiler specifics
|
||||
|
|
@ -210,7 +210,7 @@ KLZ4FLIB_API int KLZ4F_compressionLevel_max(void); /* v1.8.0+ */
|
|||
* Returns the maximum possible compressed size with KLZ4F_compressFrame() given srcSize and preferences.
|
||||
* `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences.
|
||||
* Note : this result is only usable with KLZ4F_compressFrame().
|
||||
* It may also be relevant to KLZ4F_compressUpdate() _only if_ no flush() operation is ever performed.
|
||||
* It may also be used with KLZ4F_compressUpdate() _if no flush() operation_ is performed.
|
||||
*/
|
||||
KLZ4FLIB_API size_t KLZ4F_compressFrameBound(size_t srcSize, const KLZ4F_preferences_t* preferencesPtr);
|
||||
|
||||
|
|
@ -230,7 +230,7 @@ KLZ4FLIB_API size_t KLZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
|
|||
* Advanced compression functions
|
||||
*************************************/
|
||||
typedef struct KLZ4F_cctx_s KLZ4F_cctx; /* incomplete type */
|
||||
typedef KLZ4F_cctx* KLZ4F_compressionContext_t; /* for compatibility with older APIs, prefer using KLZ4F_cctx */
|
||||
typedef KLZ4F_cctx* KLZ4F_compressionContext_t; /* for compatibility with previous API version */
|
||||
|
||||
typedef struct {
|
||||
unsigned stableSrc; /* 1 == src content will remain present on future calls to KLZ4F_compress(); skip copying src content within tmp buffer */
|
||||
|
|
@ -243,27 +243,20 @@ typedef struct {
|
|||
KLZ4FLIB_API unsigned KLZ4F_getVersion(void);
|
||||
|
||||
/*! KLZ4F_createCompressionContext() :
|
||||
* The first thing to do is to create a compressionContext object,
|
||||
* which will keep track of operation state during streaming compression.
|
||||
* This is achieved using KLZ4F_createCompressionContext(), which takes as argument a version,
|
||||
* and a pointer to KLZ4F_cctx*, to write the resulting pointer into.
|
||||
* @version provided MUST be KLZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL.
|
||||
* The function provides a pointer to a fully allocated KLZ4F_cctx object.
|
||||
* @cctxPtr MUST be != NULL.
|
||||
* If @return != zero, context creation failed.
|
||||
* A created compression context can be employed multiple times for consecutive streaming operations.
|
||||
* Once all streaming compression jobs are completed,
|
||||
* the state object can be released using KLZ4F_freeCompressionContext().
|
||||
* Note1 : KLZ4F_freeCompressionContext() is always successful. Its return value can be ignored.
|
||||
* Note2 : KLZ4F_freeCompressionContext() works fine with NULL input pointers (do nothing).
|
||||
**/
|
||||
* The first thing to do is to create a compressionContext object, which will be used in all compression operations.
|
||||
* This is achieved using KLZ4F_createCompressionContext(), which takes as argument a version.
|
||||
* The version provided MUST be KLZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL.
|
||||
* The function will provide a pointer to a fully allocated KLZ4F_cctx object.
|
||||
* If @return != zero, there was an error during context creation.
|
||||
* Object can release its memory using KLZ4F_freeCompressionContext();
|
||||
*/
|
||||
KLZ4FLIB_API KLZ4F_errorCode_t KLZ4F_createCompressionContext(KLZ4F_cctx** cctxPtr, unsigned version);
|
||||
KLZ4FLIB_API KLZ4F_errorCode_t KLZ4F_freeCompressionContext(KLZ4F_cctx* cctx);
|
||||
|
||||
|
||||
/*---- Compression ----*/
|
||||
|
||||
#define KLZ4F_HEADER_SIZE_MIN 7 /* KLZ4 Frame header size can vary, depending on selected parameters */
|
||||
#define KLZ4F_HEADER_SIZE_MIN 7 /* KLZ4 Frame header size can vary, depending on selected paramaters */
|
||||
#define KLZ4F_HEADER_SIZE_MAX 19
|
||||
|
||||
/* Size in bytes of a block header in little-endian format. Highest bit indicates if block data is uncompressed */
|
||||
|
|
@ -308,9 +301,8 @@ KLZ4FLIB_API size_t KLZ4F_compressBound(size_t srcSize, const KLZ4F_preferences_
|
|||
* Important rule: dstCapacity MUST be large enough to ensure operation success even in worst case situations.
|
||||
* This value is provided by KLZ4F_compressBound().
|
||||
* If this condition is not respected, KLZ4F_compress() will fail (result is an errorCode).
|
||||
* After an error, the state is left in a UB state, and must be re-initialized or freed.
|
||||
* If previously an uncompressed block was written, buffered data is flushed
|
||||
* before appending compressed data is continued.
|
||||
* KLZ4F_compressUpdate() doesn't guarantee error recovery.
|
||||
* When an error occurs, compression context must be freed or resized.
|
||||
* `cOptPtr` is optional : NULL can be provided, in which case all options are set to default.
|
||||
* @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered).
|
||||
* or an error code if it fails (which can be tested using KLZ4F_isError())
|
||||
|
|
@ -355,12 +347,8 @@ typedef struct KLZ4F_dctx_s KLZ4F_dctx; /* incomplete type */
|
|||
typedef KLZ4F_dctx* KLZ4F_decompressionContext_t; /* compatibility with previous API versions */
|
||||
|
||||
typedef struct {
|
||||
unsigned stableDst; /* pledges that last 64KB decompressed data will remain available unmodified between invocations.
|
||||
* This optimization skips storage operations in tmp buffers. */
|
||||
unsigned skipChecksums; /* disable checksum calculation and verification, even when one is present in frame, to save CPU time.
|
||||
* Setting this option to 1 once disables all checksums for the rest of the frame. */
|
||||
unsigned reserved1; /* must be set to zero for forward compatibility */
|
||||
unsigned reserved0; /* idem */
|
||||
unsigned stableDst; /* pledges that last 64KB decompressed data will remain available unmodified. This optimization skips storage operations in tmp buffers. */
|
||||
unsigned reserved[3]; /* must be set to zero for forward compatibility */
|
||||
} KLZ4F_decompressOptions_t;
|
||||
|
||||
|
||||
|
|
@ -368,10 +356,9 @@ typedef struct {
|
|||
|
||||
/*! KLZ4F_createDecompressionContext() :
|
||||
* Create an KLZ4F_dctx object, to track all decompression operations.
|
||||
* @version provided MUST be KLZ4F_VERSION.
|
||||
* @dctxPtr MUST be valid.
|
||||
* The function fills @dctxPtr with the value of a pointer to an allocated and initialized KLZ4F_dctx object.
|
||||
* The @return is an errorCode, which can be tested using KLZ4F_isError().
|
||||
* The version provided MUST be KLZ4F_VERSION.
|
||||
* The function provides a pointer to an allocated and initialized KLZ4F_dctx object.
|
||||
* The result is an errorCode, which can be tested using KLZ4F_isError().
|
||||
* dctx memory can be released using KLZ4F_freeDecompressionContext();
|
||||
* Result of KLZ4F_freeDecompressionContext() indicates current state of decompressionContext when being released.
|
||||
* That is, it should be == 0 if decompression has been completed fully and correctly.
|
||||
|
|
@ -384,8 +371,6 @@ KLZ4FLIB_API KLZ4F_errorCode_t KLZ4F_freeDecompressionContext(KLZ4F_dctx* dctx);
|
|||
* Streaming decompression functions
|
||||
*************************************/
|
||||
|
||||
#define KLZ4F_MAGICNUMBER 0x184D2204U
|
||||
#define KLZ4F_MAGIC_SKIPPABLE_START 0x184D2A50U
|
||||
#define KLZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH 5
|
||||
|
||||
/*! KLZ4F_headerSize() : v1.9.0+
|
||||
|
|
@ -401,7 +386,7 @@ KLZ4FLIB_API size_t KLZ4F_headerSize(const void* src, size_t srcSize);
|
|||
|
||||
/*! KLZ4F_getFrameInfo() :
|
||||
* This function extracts frame parameters (max blockSize, dictID, etc.).
|
||||
* Its usage is optional: user can also invoke KLZ4F_decompress() directly.
|
||||
* Its usage is optional: user can call KLZ4F_decompress() directly.
|
||||
*
|
||||
* Extracted information will fill an existing KLZ4F_frameInfo_t structure.
|
||||
* This can be useful for allocation and dictionary identification purposes.
|
||||
|
|
@ -442,10 +427,9 @@ KLZ4FLIB_API size_t KLZ4F_headerSize(const void* src, size_t srcSize);
|
|||
* note 1 : in case of error, dctx is not modified. Decoding operation can resume from beginning safely.
|
||||
* note 2 : frame parameters are *copied into* an already allocated KLZ4F_frameInfo_t structure.
|
||||
*/
|
||||
KLZ4FLIB_API size_t
|
||||
KLZ4F_getFrameInfo(KLZ4F_dctx* dctx,
|
||||
KLZ4F_frameInfo_t* frameInfoPtr,
|
||||
const void* srcBuffer, size_t* srcSizePtr);
|
||||
KLZ4FLIB_API size_t KLZ4F_getFrameInfo(KLZ4F_dctx* dctx,
|
||||
KLZ4F_frameInfo_t* frameInfoPtr,
|
||||
const void* srcBuffer, size_t* srcSizePtr);
|
||||
|
||||
/*! KLZ4F_decompress() :
|
||||
* Call this function repetitively to regenerate data compressed in `srcBuffer`.
|
||||
|
|
@ -478,11 +462,10 @@ KLZ4F_getFrameInfo(KLZ4F_dctx* dctx,
|
|||
*
|
||||
* After a frame is fully decoded, dctx can be used again to decompress another frame.
|
||||
*/
|
||||
KLZ4FLIB_API size_t
|
||||
KLZ4F_decompress(KLZ4F_dctx* dctx,
|
||||
void* dstBuffer, size_t* dstSizePtr,
|
||||
const void* srcBuffer, size_t* srcSizePtr,
|
||||
const KLZ4F_decompressOptions_t* dOptPtr);
|
||||
KLZ4FLIB_API size_t KLZ4F_decompress(KLZ4F_dctx* dctx,
|
||||
void* dstBuffer, size_t* dstSizePtr,
|
||||
const void* srcBuffer, size_t* srcSizePtr,
|
||||
const KLZ4F_decompressOptions_t* dOptPtr);
|
||||
|
||||
|
||||
/*! KLZ4F_resetDecompressionContext() : added in v1.8.0
|
||||
|
|
@ -546,8 +529,6 @@ extern "C" {
|
|||
ITEM(ERROR_headerChecksum_invalid) \
|
||||
ITEM(ERROR_contentChecksum_invalid) \
|
||||
ITEM(ERROR_frameDecoding_alreadyStarted) \
|
||||
ITEM(ERROR_compressionState_uninitialized) \
|
||||
ITEM(ERROR_parameter_null) \
|
||||
ITEM(ERROR_maxCode)
|
||||
|
||||
#define KLZ4F_GENERATE_ENUM(ENUM) KLZ4F_##ENUM,
|
||||
|
|
@ -558,31 +539,7 @@ typedef enum { KLZ4F_LIST_ERRORS(KLZ4F_GENERATE_ENUM)
|
|||
|
||||
KLZ4FLIB_STATIC_API KLZ4F_errorCodes KLZ4F_getErrorCode(size_t functionResult);
|
||||
|
||||
|
||||
/*! KLZ4F_getBlockSize() :
|
||||
* Return, in scalar format (size_t),
|
||||
* the maximum block size associated with blockSizeID.
|
||||
**/
|
||||
KLZ4FLIB_STATIC_API size_t KLZ4F_getBlockSize(KLZ4F_blockSizeID_t blockSizeID);
|
||||
|
||||
/*! KLZ4F_uncompressedUpdate() :
|
||||
* KLZ4F_uncompressedUpdate() can be called repetitively to add as much data uncompressed data as necessary.
|
||||
* Important rule: dstCapacity MUST be large enough to store the entire source buffer as
|
||||
* no compression is done for this operation
|
||||
* If this condition is not respected, KLZ4F_uncompressedUpdate() will fail (result is an errorCode).
|
||||
* After an error, the state is left in a UB state, and must be re-initialized or freed.
|
||||
* If previously a compressed block was written, buffered data is flushed
|
||||
* before appending uncompressed data is continued.
|
||||
* This is only supported when KLZ4F_blockIndependent is used
|
||||
* `cOptPtr` is optional : NULL can be provided, in which case all options are set to default.
|
||||
* @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered).
|
||||
* or an error code if it fails (which can be tested using KLZ4F_isError())
|
||||
*/
|
||||
KLZ4FLIB_STATIC_API size_t
|
||||
KLZ4F_uncompressedUpdate(KLZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const void* srcBuffer, size_t srcSize,
|
||||
const KLZ4F_compressOptions_t* cOptPtr);
|
||||
KLZ4FLIB_STATIC_API size_t KLZ4F_getBlockSize(unsigned);
|
||||
|
||||
/**********************************
|
||||
* Bulk processing dictionary API
|
||||
|
|
@ -626,12 +583,12 @@ KLZ4FLIB_STATIC_API void KLZ4F_freeCDict(KLZ4F_CDict* CDict);
|
|||
* but it's not recommended, as it's the only way to provide dictID in the frame header.
|
||||
* @return : number of bytes written into dstBuffer.
|
||||
* or an error code if it fails (can be tested using KLZ4F_isError()) */
|
||||
KLZ4FLIB_STATIC_API size_t
|
||||
KLZ4F_compressFrame_usingCDict(KLZ4F_cctx* cctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const KLZ4F_CDict* cdict,
|
||||
const KLZ4F_preferences_t* preferencesPtr);
|
||||
KLZ4FLIB_STATIC_API size_t KLZ4F_compressFrame_usingCDict(
|
||||
KLZ4F_cctx* cctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const KLZ4F_CDict* cdict,
|
||||
const KLZ4F_preferences_t* preferencesPtr);
|
||||
|
||||
|
||||
/*! KLZ4F_compressBegin_usingCDict() :
|
||||
|
|
@ -641,49 +598,23 @@ KLZ4F_compressFrame_usingCDict(KLZ4F_cctx* cctx,
|
|||
* however, it's the only way to provide dictID in the frame header.
|
||||
* @return : number of bytes written into dstBuffer for the header,
|
||||
* or an error code (which can be tested using KLZ4F_isError()) */
|
||||
KLZ4FLIB_STATIC_API size_t
|
||||
KLZ4F_compressBegin_usingCDict(KLZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const KLZ4F_CDict* cdict,
|
||||
const KLZ4F_preferences_t* prefsPtr);
|
||||
KLZ4FLIB_STATIC_API size_t KLZ4F_compressBegin_usingCDict(
|
||||
KLZ4F_cctx* cctx,
|
||||
void* dstBuffer, size_t dstCapacity,
|
||||
const KLZ4F_CDict* cdict,
|
||||
const KLZ4F_preferences_t* prefsPtr);
|
||||
|
||||
|
||||
/*! KLZ4F_decompress_usingDict() :
|
||||
* Same as KLZ4F_decompress(), using a predefined dictionary.
|
||||
* Dictionary is used "in place", without any preprocessing.
|
||||
** It must remain accessible throughout the entire frame decoding. */
|
||||
KLZ4FLIB_STATIC_API size_t
|
||||
KLZ4F_decompress_usingDict(KLZ4F_dctx* dctxPtr,
|
||||
void* dstBuffer, size_t* dstSizePtr,
|
||||
const void* srcBuffer, size_t* srcSizePtr,
|
||||
const void* dict, size_t dictSize,
|
||||
const KLZ4F_decompressOptions_t* decompressOptionsPtr);
|
||||
|
||||
|
||||
/*! Custom memory allocation :
|
||||
* These prototypes make it possible to pass custom allocation/free functions.
|
||||
* KLZ4F_customMem is provided at state creation time, using KLZ4F_create*_advanced() listed below.
|
||||
* All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
|
||||
*/
|
||||
typedef void* (*KLZ4F_AllocFunction) (void* opaqueState, size_t size);
|
||||
typedef void* (*KLZ4F_CallocFunction) (void* opaqueState, size_t size);
|
||||
typedef void (*KLZ4F_FreeFunction) (void* opaqueState, void* address);
|
||||
typedef struct {
|
||||
KLZ4F_AllocFunction customAlloc;
|
||||
KLZ4F_CallocFunction customCalloc; /* optional; when not defined, uses customAlloc + memset */
|
||||
KLZ4F_FreeFunction customFree;
|
||||
void* opaqueState;
|
||||
} KLZ4F_CustomMem;
|
||||
static
|
||||
#ifdef __GNUC__
|
||||
__attribute__((__unused__))
|
||||
#endif
|
||||
KLZ4F_CustomMem const KLZ4F_defaultCMem = { NULL, NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */
|
||||
|
||||
KLZ4FLIB_STATIC_API KLZ4F_cctx* KLZ4F_createCompressionContext_advanced(KLZ4F_CustomMem customMem, unsigned version);
|
||||
KLZ4FLIB_STATIC_API KLZ4F_dctx* KLZ4F_createDecompressionContext_advanced(KLZ4F_CustomMem customMem, unsigned version);
|
||||
KLZ4FLIB_STATIC_API KLZ4F_CDict* KLZ4F_createCDict_advanced(KLZ4F_CustomMem customMem, const void* dictBuffer, size_t dictSize);
|
||||
|
||||
* It must remain accessible throughout the entire frame decoding. */
|
||||
KLZ4FLIB_STATIC_API size_t KLZ4F_decompress_usingDict(
|
||||
KLZ4F_dctx* dctxPtr,
|
||||
void* dstBuffer, size_t* dstSizePtr,
|
||||
const void* srcBuffer, size_t* srcSizePtr,
|
||||
const void* dict, size_t dictSize,
|
||||
const KLZ4F_decompressOptions_t* decompressOptionsPtr);
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
KLZ4 auto-framing library
|
||||
Header File for static linking only
|
||||
Copyright (C) 2011-2020, Yann Collet.
|
||||
Copyright (C) 2011-2016, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/*
|
||||
KLZ4 HC - High Compression Mode of KLZ4
|
||||
Copyright (C) 2011-2020, Yann Collet.
|
||||
Copyright (C) 2011-2017, Yann Collet.
|
||||
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
|
|
@ -42,7 +42,7 @@
|
|||
* Select how default compression function will allocate workplace memory,
|
||||
* in stack (0:fastest), or in heap (1:requires malloc()).
|
||||
* Since workplace is rather large, heap mode is recommended.
|
||||
**/
|
||||
*/
|
||||
#ifndef KLZ4HC_HEAPMODE
|
||||
# define KLZ4HC_HEAPMODE 1
|
||||
#endif
|
||||
|
|
@ -99,20 +99,18 @@ static void KLZ4HC_clearTables (KLZ4HC_CCtx_internal* hc4)
|
|||
|
||||
static void KLZ4HC_init_internal (KLZ4HC_CCtx_internal* hc4, const BYTE* start)
|
||||
{
|
||||
size_t const bufferSize = (size_t)(hc4->end - hc4->prefixStart);
|
||||
size_t newStartingOffset = bufferSize + hc4->dictLimit;
|
||||
assert(newStartingOffset >= bufferSize); /* check overflow */
|
||||
if (newStartingOffset > 1 GB) {
|
||||
uptrval startingOffset = (uptrval)(hc4->end - hc4->base);
|
||||
if (startingOffset > 1 GB) {
|
||||
KLZ4HC_clearTables(hc4);
|
||||
newStartingOffset = 0;
|
||||
startingOffset = 0;
|
||||
}
|
||||
newStartingOffset += 64 KB;
|
||||
hc4->nextToUpdate = (U32)newStartingOffset;
|
||||
hc4->prefixStart = start;
|
||||
startingOffset += 64 KB;
|
||||
hc4->nextToUpdate = (U32) startingOffset;
|
||||
hc4->base = start - startingOffset;
|
||||
hc4->end = start;
|
||||
hc4->dictStart = start;
|
||||
hc4->dictLimit = (U32)newStartingOffset;
|
||||
hc4->lowLimit = (U32)newStartingOffset;
|
||||
hc4->dictBase = start - startingOffset;
|
||||
hc4->dictLimit = (U32) startingOffset;
|
||||
hc4->lowLimit = (U32) startingOffset;
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -121,15 +119,12 @@ KLZ4_FORCE_INLINE void KLZ4HC_Insert (KLZ4HC_CCtx_internal* hc4, const BYTE* ip)
|
|||
{
|
||||
U16* const chainTable = hc4->chainTable;
|
||||
U32* const hashTable = hc4->hashTable;
|
||||
const BYTE* const prefixPtr = hc4->prefixStart;
|
||||
U32 const prefixIdx = hc4->dictLimit;
|
||||
U32 const target = (U32)(ip - prefixPtr) + prefixIdx;
|
||||
const BYTE* const base = hc4->base;
|
||||
U32 const target = (U32)(ip - base);
|
||||
U32 idx = hc4->nextToUpdate;
|
||||
assert(ip >= prefixPtr);
|
||||
assert(target >= prefixIdx);
|
||||
|
||||
while (idx < target) {
|
||||
U32 const h = KLZ4HC_hashPtr(prefixPtr+idx-prefixIdx);
|
||||
U32 const h = KLZ4HC_hashPtr(base+idx);
|
||||
size_t delta = idx - hashTable[h];
|
||||
if (delta>KLZ4_DISTANCE_MAX) delta = KLZ4_DISTANCE_MAX;
|
||||
DELTANEXTU16(chainTable, idx) = (U16)delta;
|
||||
|
|
@ -198,14 +193,15 @@ KLZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
|
|||
BYTE const byte = (BYTE)(pattern >> bitOffset);
|
||||
if (*ip != byte) break;
|
||||
ip ++; bitOffset -= 8;
|
||||
} }
|
||||
}
|
||||
}
|
||||
|
||||
return (unsigned)(ip - iStart);
|
||||
}
|
||||
|
||||
/* KLZ4HC_reverseCountPattern() :
|
||||
* pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!)
|
||||
* read using natural platform endianness */
|
||||
* read using natural platform endianess */
|
||||
static unsigned
|
||||
KLZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
|
||||
{
|
||||
|
|
@ -215,7 +211,7 @@ KLZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
|
|||
if (KLZ4_read32(ip-4) != pattern) break;
|
||||
ip -= 4;
|
||||
}
|
||||
{ const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianness */
|
||||
{ const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianess */
|
||||
while (likely(ip>iLow)) {
|
||||
if (ip[-1] != *bytePtr) break;
|
||||
ip--; bytePtr--;
|
||||
|
|
@ -238,28 +234,28 @@ typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e;
|
|||
|
||||
KLZ4_FORCE_INLINE int
|
||||
KLZ4HC_InsertAndGetWiderMatch (
|
||||
KLZ4HC_CCtx_internal* const hc4,
|
||||
const BYTE* const ip,
|
||||
const BYTE* const iLowLimit, const BYTE* const iHighLimit,
|
||||
int longest,
|
||||
const BYTE** matchpos,
|
||||
const BYTE** startpos,
|
||||
const int maxNbAttempts,
|
||||
const int patternAnalysis, const int chainSwap,
|
||||
const dictCtx_directive dict,
|
||||
const HCfavor_e favorDecSpeed)
|
||||
KLZ4HC_CCtx_internal* hc4,
|
||||
const BYTE* const ip,
|
||||
const BYTE* const iLowLimit,
|
||||
const BYTE* const iHighLimit,
|
||||
int longest,
|
||||
const BYTE** matchpos,
|
||||
const BYTE** startpos,
|
||||
const int maxNbAttempts,
|
||||
const int patternAnalysis,
|
||||
const int chainSwap,
|
||||
const dictCtx_directive dict,
|
||||
const HCfavor_e favorDecSpeed)
|
||||
{
|
||||
U16* const chainTable = hc4->chainTable;
|
||||
U32* const HashTable = hc4->hashTable;
|
||||
const KLZ4HC_CCtx_internal * const dictCtx = hc4->dictCtx;
|
||||
const BYTE* const prefixPtr = hc4->prefixStart;
|
||||
const U32 prefixIdx = hc4->dictLimit;
|
||||
const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx;
|
||||
const int withinStartDistance = (hc4->lowLimit + (KLZ4_DISTANCE_MAX + 1) > ipIndex);
|
||||
const U32 lowestMatchIndex = (withinStartDistance) ? hc4->lowLimit : ipIndex - KLZ4_DISTANCE_MAX;
|
||||
const BYTE* const dictStart = hc4->dictStart;
|
||||
const U32 dictIdx = hc4->lowLimit;
|
||||
const BYTE* const dictEnd = dictStart + prefixIdx - dictIdx;
|
||||
const BYTE* const base = hc4->base;
|
||||
const U32 dictLimit = hc4->dictLimit;
|
||||
const BYTE* const lowPrefixPtr = base + dictLimit;
|
||||
const U32 ipIndex = (U32)(ip - base);
|
||||
const U32 lowestMatchIndex = (hc4->lowLimit + (KLZ4_DISTANCE_MAX + 1) > ipIndex) ? hc4->lowLimit : ipIndex - KLZ4_DISTANCE_MAX;
|
||||
const BYTE* const dictBase = hc4->dictBase;
|
||||
int const lookBackLength = (int)(ip-iLowLimit);
|
||||
int nbAttempts = maxNbAttempts;
|
||||
U32 matchChainPos = 0;
|
||||
|
|
@ -281,13 +277,14 @@ KLZ4HC_InsertAndGetWiderMatch (
|
|||
assert(matchIndex < ipIndex);
|
||||
if (favorDecSpeed && (ipIndex - matchIndex < 8)) {
|
||||
/* do nothing */
|
||||
} else if (matchIndex >= prefixIdx) { /* within current Prefix */
|
||||
const BYTE* const matchPtr = prefixPtr + matchIndex - prefixIdx;
|
||||
} else if (matchIndex >= dictLimit) { /* within current Prefix */
|
||||
const BYTE* const matchPtr = base + matchIndex;
|
||||
assert(matchPtr >= lowPrefixPtr);
|
||||
assert(matchPtr < ip);
|
||||
assert(longest >= 1);
|
||||
if (KLZ4_read16(iLowLimit + longest - 1) == KLZ4_read16(matchPtr - lookBackLength + longest - 1)) {
|
||||
if (KLZ4_read32(matchPtr) == pattern) {
|
||||
int const back = lookBackLength ? KLZ4HC_countBack(ip, matchPtr, iLowLimit, prefixPtr) : 0;
|
||||
int const back = lookBackLength ? KLZ4HC_countBack(ip, matchPtr, iLowLimit, lowPrefixPtr) : 0;
|
||||
matchLength = MINMATCH + (int)KLZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit);
|
||||
matchLength -= back;
|
||||
if (matchLength > longest) {
|
||||
|
|
@ -296,25 +293,24 @@ KLZ4HC_InsertAndGetWiderMatch (
|
|||
*startpos = ip + back;
|
||||
} } }
|
||||
} else { /* lowestMatchIndex <= matchIndex < dictLimit */
|
||||
const BYTE* const matchPtr = dictStart + (matchIndex - dictIdx);
|
||||
assert(matchIndex >= dictIdx);
|
||||
if ( likely(matchIndex <= prefixIdx - 4)
|
||||
&& (KLZ4_read32(matchPtr) == pattern) ) {
|
||||
const BYTE* const matchPtr = dictBase + matchIndex;
|
||||
if (KLZ4_read32(matchPtr) == pattern) {
|
||||
const BYTE* const dictStart = dictBase + hc4->lowLimit;
|
||||
int back = 0;
|
||||
const BYTE* vLimit = ip + (prefixIdx - matchIndex);
|
||||
const BYTE* vLimit = ip + (dictLimit - matchIndex);
|
||||
if (vLimit > iHighLimit) vLimit = iHighLimit;
|
||||
matchLength = (int)KLZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
|
||||
if ((ip+matchLength == vLimit) && (vLimit < iHighLimit))
|
||||
matchLength += KLZ4_count(ip+matchLength, prefixPtr, iHighLimit);
|
||||
matchLength += KLZ4_count(ip+matchLength, lowPrefixPtr, iHighLimit);
|
||||
back = lookBackLength ? KLZ4HC_countBack(ip, matchPtr, iLowLimit, dictStart) : 0;
|
||||
matchLength -= back;
|
||||
if (matchLength > longest) {
|
||||
longest = matchLength;
|
||||
*matchpos = prefixPtr - prefixIdx + matchIndex + back; /* virtual pos, relative to ip, to retrieve offset */
|
||||
*matchpos = base + matchIndex + back; /* virtual pos, relative to ip, to retrieve offset */
|
||||
*startpos = ip + back;
|
||||
} } }
|
||||
|
||||
if (chainSwap && matchLength==longest) { /* better match => select a better chain */
|
||||
if (chainSwap && matchLength==longest) { /* better match => select a better chain */
|
||||
assert(lookBackLength==0); /* search forward only */
|
||||
if (matchIndex + (U32)longest <= ipIndex) {
|
||||
int const kTrigger = 4;
|
||||
|
|
@ -330,7 +326,8 @@ KLZ4HC_InsertAndGetWiderMatch (
|
|||
distanceToNextMatch = candidateDist;
|
||||
matchChainPos = (U32)pos;
|
||||
accel = 1 << kTrigger;
|
||||
} }
|
||||
}
|
||||
}
|
||||
if (distanceToNextMatch > 1) {
|
||||
if (distanceToNextMatch > matchIndex) break; /* avoid overflow */
|
||||
matchIndex -= distanceToNextMatch;
|
||||
|
|
@ -350,24 +347,23 @@ KLZ4HC_InsertAndGetWiderMatch (
|
|||
repeat = rep_not;
|
||||
} }
|
||||
if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex)
|
||||
&& KLZ4HC_protectDictEnd(prefixIdx, matchCandidateIdx) ) {
|
||||
const int extDict = matchCandidateIdx < prefixIdx;
|
||||
const BYTE* const matchPtr = (extDict ? dictStart - dictIdx : prefixPtr - prefixIdx) + matchCandidateIdx;
|
||||
&& KLZ4HC_protectDictEnd(dictLimit, matchCandidateIdx) ) {
|
||||
const int extDict = matchCandidateIdx < dictLimit;
|
||||
const BYTE* const matchPtr = (extDict ? dictBase : base) + matchCandidateIdx;
|
||||
if (KLZ4_read32(matchPtr) == pattern) { /* good candidate */
|
||||
const BYTE* const iLimit = extDict ? dictEnd : iHighLimit;
|
||||
const BYTE* const dictStart = dictBase + hc4->lowLimit;
|
||||
const BYTE* const iLimit = extDict ? dictBase + dictLimit : iHighLimit;
|
||||
size_t forwardPatternLength = KLZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern);
|
||||
if (extDict && matchPtr + forwardPatternLength == iLimit) {
|
||||
U32 const rotatedPattern = KLZ4HC_rotatePattern(forwardPatternLength, pattern);
|
||||
forwardPatternLength += KLZ4HC_countPattern(prefixPtr, iHighLimit, rotatedPattern);
|
||||
forwardPatternLength += KLZ4HC_countPattern(lowPrefixPtr, iHighLimit, rotatedPattern);
|
||||
}
|
||||
{ const BYTE* const lowestMatchPtr = extDict ? dictStart : prefixPtr;
|
||||
{ const BYTE* const lowestMatchPtr = extDict ? dictStart : lowPrefixPtr;
|
||||
size_t backLength = KLZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern);
|
||||
size_t currentSegmentLength;
|
||||
if (!extDict
|
||||
&& matchPtr - backLength == prefixPtr
|
||||
&& dictIdx < prefixIdx) {
|
||||
if (!extDict && matchPtr - backLength == lowPrefixPtr && hc4->lowLimit < dictLimit) {
|
||||
U32 const rotatedPattern = KLZ4HC_rotatePattern((U32)(-(int)backLength), pattern);
|
||||
backLength += KLZ4HC_reverseCountPattern(dictEnd, dictStart, rotatedPattern);
|
||||
backLength += KLZ4HC_reverseCountPattern(dictBase + dictLimit, dictStart, rotatedPattern);
|
||||
}
|
||||
/* Limit backLength not go further than lowestMatchIndex */
|
||||
backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex);
|
||||
|
|
@ -377,28 +373,28 @@ KLZ4HC_InsertAndGetWiderMatch (
|
|||
if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */
|
||||
&& (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */
|
||||
U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */
|
||||
if (KLZ4HC_protectDictEnd(prefixIdx, newMatchIndex))
|
||||
if (KLZ4HC_protectDictEnd(dictLimit, newMatchIndex))
|
||||
matchIndex = newMatchIndex;
|
||||
else {
|
||||
/* Can only happen if started in the prefix */
|
||||
assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict);
|
||||
matchIndex = prefixIdx;
|
||||
assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
|
||||
matchIndex = dictLimit;
|
||||
}
|
||||
} else {
|
||||
U32 const newMatchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */
|
||||
if (!KLZ4HC_protectDictEnd(prefixIdx, newMatchIndex)) {
|
||||
assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict);
|
||||
matchIndex = prefixIdx;
|
||||
if (!KLZ4HC_protectDictEnd(dictLimit, newMatchIndex)) {
|
||||
assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
|
||||
matchIndex = dictLimit;
|
||||
} else {
|
||||
matchIndex = newMatchIndex;
|
||||
if (lookBackLength==0) { /* no back possible */
|
||||
size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
|
||||
if ((size_t)longest < maxML) {
|
||||
assert(prefixPtr - prefixIdx + matchIndex != ip);
|
||||
if ((size_t)(ip - prefixPtr) + prefixIdx - matchIndex > KLZ4_DISTANCE_MAX) break;
|
||||
assert(base + matchIndex != ip);
|
||||
if ((size_t)(ip - base) - matchIndex > KLZ4_DISTANCE_MAX) break;
|
||||
assert(maxML < 2 GB);
|
||||
longest = (int)maxML;
|
||||
*matchpos = prefixPtr - prefixIdx + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
|
||||
*matchpos = base + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
|
||||
*startpos = ip;
|
||||
}
|
||||
{ U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
|
||||
|
|
@ -417,12 +413,12 @@ KLZ4HC_InsertAndGetWiderMatch (
|
|||
if ( dict == usingDictCtxHc
|
||||
&& nbAttempts > 0
|
||||
&& ipIndex - lowestMatchIndex < KLZ4_DISTANCE_MAX) {
|
||||
size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit;
|
||||
size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->base);
|
||||
U32 dictMatchIndex = dictCtx->hashTable[KLZ4HC_hashPtr(ip)];
|
||||
assert(dictEndOffset <= 1 GB);
|
||||
matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset;
|
||||
while (ipIndex - matchIndex <= KLZ4_DISTANCE_MAX && nbAttempts--) {
|
||||
const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + dictMatchIndex;
|
||||
const BYTE* const matchPtr = dictCtx->base + dictMatchIndex;
|
||||
|
||||
if (KLZ4_read32(matchPtr) == pattern) {
|
||||
int mlt;
|
||||
|
|
@ -430,11 +426,11 @@ KLZ4HC_InsertAndGetWiderMatch (
|
|||
const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex);
|
||||
if (vLimit > iHighLimit) vLimit = iHighLimit;
|
||||
mlt = (int)KLZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
|
||||
back = lookBackLength ? KLZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->prefixStart) : 0;
|
||||
back = lookBackLength ? KLZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->base + dictCtx->dictLimit) : 0;
|
||||
mlt -= back;
|
||||
if (mlt > longest) {
|
||||
longest = mlt;
|
||||
*matchpos = prefixPtr - prefixIdx + matchIndex + back;
|
||||
*matchpos = base + matchIndex + back;
|
||||
*startpos = ip + back;
|
||||
} }
|
||||
|
||||
|
|
@ -446,13 +442,13 @@ KLZ4HC_InsertAndGetWiderMatch (
|
|||
return longest;
|
||||
}
|
||||
|
||||
KLZ4_FORCE_INLINE int
|
||||
KLZ4HC_InsertAndFindBestMatch(KLZ4HC_CCtx_internal* const hc4, /* Index table will be updated */
|
||||
const BYTE* const ip, const BYTE* const iLimit,
|
||||
const BYTE** matchpos,
|
||||
const int maxNbAttempts,
|
||||
const int patternAnalysis,
|
||||
const dictCtx_directive dict)
|
||||
KLZ4_FORCE_INLINE
|
||||
int KLZ4HC_InsertAndFindBestMatch(KLZ4HC_CCtx_internal* const hc4, /* Index table will be updated */
|
||||
const BYTE* const ip, const BYTE* const iLimit,
|
||||
const BYTE** matchpos,
|
||||
const int maxNbAttempts,
|
||||
const int patternAnalysis,
|
||||
const dictCtx_directive dict)
|
||||
{
|
||||
const BYTE* uselessPtr = ip;
|
||||
/* note : KLZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
|
||||
|
|
@ -755,7 +751,7 @@ _last_literals:
|
|||
} else {
|
||||
*op++ = (BYTE)(lastRunSize << ML_BITS);
|
||||
}
|
||||
KLZ4_memcpy(op, anchor, lastRunSize);
|
||||
memcpy(op, anchor, lastRunSize);
|
||||
op += lastRunSize;
|
||||
}
|
||||
|
||||
|
|
@ -888,13 +884,13 @@ KLZ4HC_compress_generic_dictCtx (
|
|||
limitedOutput_directive limit
|
||||
)
|
||||
{
|
||||
const size_t position = (size_t)(ctx->end - ctx->prefixStart) + (ctx->dictLimit - ctx->lowLimit);
|
||||
const size_t position = (size_t)(ctx->end - ctx->base) - ctx->lowLimit;
|
||||
assert(ctx->dictCtx != NULL);
|
||||
if (position >= 64 KB) {
|
||||
ctx->dictCtx = NULL;
|
||||
return KLZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
|
||||
} else if (position == 0 && *srcSizePtr > 4 KB) {
|
||||
KLZ4_memcpy(ctx, ctx->dictCtx, sizeof(KLZ4HC_CCtx_internal));
|
||||
memcpy(ctx, ctx->dictCtx, sizeof(KLZ4HC_CCtx_internal));
|
||||
KLZ4HC_setExternalDict(ctx, (const BYTE *)src);
|
||||
ctx->compressionLevel = (short)cLevel;
|
||||
return KLZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
|
||||
|
|
@ -957,15 +953,13 @@ int KLZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int sr
|
|||
|
||||
int KLZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
|
||||
{
|
||||
int cSize;
|
||||
#if defined(KLZ4HC_HEAPMODE) && KLZ4HC_HEAPMODE==1
|
||||
KLZ4_streamHC_t* const statePtr = (KLZ4_streamHC_t*)ALLOC(sizeof(KLZ4_streamHC_t));
|
||||
if (statePtr==NULL) return 0;
|
||||
#else
|
||||
KLZ4_streamHC_t state;
|
||||
KLZ4_streamHC_t* const statePtr = &state;
|
||||
#endif
|
||||
cSize = KLZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
|
||||
int const cSize = KLZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
|
||||
#if defined(KLZ4HC_HEAPMODE) && KLZ4HC_HEAPMODE==1
|
||||
FREEMEM(statePtr);
|
||||
#endif
|
||||
|
|
@ -988,7 +982,6 @@ int KLZ4_compress_HC_destSize(void* state, const char* source, char* dest, int*
|
|||
* Streaming Functions
|
||||
**************************************/
|
||||
/* allocation */
|
||||
#if !defined(KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
|
||||
KLZ4_streamHC_t* KLZ4_createStreamHC(void)
|
||||
{
|
||||
KLZ4_streamHC_t* const state =
|
||||
|
|
@ -1005,12 +998,13 @@ int KLZ4_freeStreamHC (KLZ4_streamHC_t* KLZ4_streamHCPtr)
|
|||
FREEMEM(KLZ4_streamHCPtr);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
KLZ4_streamHC_t* KLZ4_initStreamHC (void* buffer, size_t size)
|
||||
{
|
||||
KLZ4_streamHC_t* const KLZ4_streamHCPtr = (KLZ4_streamHC_t*)buffer;
|
||||
/* if compilation fails here, KLZ4_STREAMHCSIZE must be increased */
|
||||
KLZ4_STATIC_ASSERT(sizeof(KLZ4HC_CCtx_internal) <= KLZ4_STREAMHCSIZE);
|
||||
DEBUGLOG(4, "KLZ4_initStreamHC(%p, %u)", buffer, (unsigned)size);
|
||||
/* check conditions */
|
||||
if (buffer == NULL) return NULL;
|
||||
|
|
@ -1036,13 +1030,9 @@ void KLZ4_resetStreamHC_fast (KLZ4_streamHC_t* KLZ4_streamHCPtr, int compression
|
|||
if (KLZ4_streamHCPtr->internal_donotuse.dirty) {
|
||||
KLZ4_initStreamHC(KLZ4_streamHCPtr, sizeof(*KLZ4_streamHCPtr));
|
||||
} else {
|
||||
/* preserve end - prefixStart : can trigger clearTable's threshold */
|
||||
if (KLZ4_streamHCPtr->internal_donotuse.end != NULL) {
|
||||
KLZ4_streamHCPtr->internal_donotuse.end -= (uptrval)KLZ4_streamHCPtr->internal_donotuse.prefixStart;
|
||||
} else {
|
||||
assert(KLZ4_streamHCPtr->internal_donotuse.prefixStart == NULL);
|
||||
}
|
||||
KLZ4_streamHCPtr->internal_donotuse.prefixStart = NULL;
|
||||
/* preserve end - base : can trigger clearTable's threshold */
|
||||
KLZ4_streamHCPtr->internal_donotuse.end -= (uptrval)KLZ4_streamHCPtr->internal_donotuse.base;
|
||||
KLZ4_streamHCPtr->internal_donotuse.base = NULL;
|
||||
KLZ4_streamHCPtr->internal_donotuse.dictCtx = NULL;
|
||||
}
|
||||
KLZ4_setCompressionLevel(KLZ4_streamHCPtr, compressionLevel);
|
||||
|
|
@ -1093,14 +1083,14 @@ void KLZ4_attach_HC_dictionary(KLZ4_streamHC_t *working_stream, const KLZ4_strea
|
|||
static void KLZ4HC_setExternalDict(KLZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
|
||||
{
|
||||
DEBUGLOG(4, "KLZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock);
|
||||
if (ctxPtr->end >= ctxPtr->prefixStart + 4)
|
||||
if (ctxPtr->end >= ctxPtr->base + ctxPtr->dictLimit + 4)
|
||||
KLZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */
|
||||
|
||||
/* Only one memory segment for extDict, so any previous extDict is lost at this stage */
|
||||
ctxPtr->lowLimit = ctxPtr->dictLimit;
|
||||
ctxPtr->dictStart = ctxPtr->prefixStart;
|
||||
ctxPtr->dictLimit += (U32)(ctxPtr->end - ctxPtr->prefixStart);
|
||||
ctxPtr->prefixStart = newBlock;
|
||||
ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
|
||||
ctxPtr->dictBase = ctxPtr->base;
|
||||
ctxPtr->base = newBlock - ctxPtr->dictLimit;
|
||||
ctxPtr->end = newBlock;
|
||||
ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */
|
||||
|
||||
|
|
@ -1119,11 +1109,11 @@ KLZ4_compressHC_continue_generic (KLZ4_streamHC_t* KLZ4_streamHCPtr,
|
|||
KLZ4_streamHCPtr, src, *srcSizePtr, limit);
|
||||
assert(ctxPtr != NULL);
|
||||
/* auto-init if forgotten */
|
||||
if (ctxPtr->prefixStart == NULL) KLZ4HC_init_internal (ctxPtr, (const BYTE*) src);
|
||||
if (ctxPtr->base == NULL) KLZ4HC_init_internal (ctxPtr, (const BYTE*) src);
|
||||
|
||||
/* Check overflow */
|
||||
if ((size_t)(ctxPtr->end - ctxPtr->prefixStart) + ctxPtr->dictLimit > 2 GB) {
|
||||
size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->prefixStart);
|
||||
if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 GB) {
|
||||
size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base) - ctxPtr->dictLimit;
|
||||
if (dictSize > 64 KB) dictSize = 64 KB;
|
||||
KLZ4_loadDictHC(KLZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize);
|
||||
}
|
||||
|
|
@ -1134,16 +1124,13 @@ KLZ4_compressHC_continue_generic (KLZ4_streamHC_t* KLZ4_streamHCPtr,
|
|||
|
||||
/* Check overlapping input/dictionary space */
|
||||
{ const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr;
|
||||
const BYTE* const dictBegin = ctxPtr->dictStart;
|
||||
const BYTE* const dictEnd = ctxPtr->dictStart + (ctxPtr->dictLimit - ctxPtr->lowLimit);
|
||||
const BYTE* const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
|
||||
const BYTE* const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
|
||||
if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) {
|
||||
if (sourceEnd > dictEnd) sourceEnd = dictEnd;
|
||||
ctxPtr->lowLimit += (U32)(sourceEnd - ctxPtr->dictStart);
|
||||
ctxPtr->dictStart += (U32)(sourceEnd - ctxPtr->dictStart);
|
||||
if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) {
|
||||
ctxPtr->lowLimit = ctxPtr->dictLimit;
|
||||
ctxPtr->dictStart = ctxPtr->prefixStart;
|
||||
} } }
|
||||
ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
|
||||
if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit;
|
||||
} }
|
||||
|
||||
return KLZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
|
||||
}
|
||||
|
|
@ -1171,7 +1158,7 @@ int KLZ4_compress_HC_continue_destSize (KLZ4_streamHC_t* KLZ4_streamHCPtr, const
|
|||
int KLZ4_saveDictHC (KLZ4_streamHC_t* KLZ4_streamHCPtr, char* safeBuffer, int dictSize)
|
||||
{
|
||||
KLZ4HC_CCtx_internal* const streamPtr = &KLZ4_streamHCPtr->internal_donotuse;
|
||||
int const prefixSize = (int)(streamPtr->end - streamPtr->prefixStart);
|
||||
int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
|
||||
DEBUGLOG(5, "KLZ4_saveDictHC(%p, %p, %d)", KLZ4_streamHCPtr, safeBuffer, dictSize);
|
||||
assert(prefixSize >= 0);
|
||||
if (dictSize > 64 KB) dictSize = 64 KB;
|
||||
|
|
@ -1179,13 +1166,12 @@ int KLZ4_saveDictHC (KLZ4_streamHC_t* KLZ4_streamHCPtr, char* safeBuffer, int di
|
|||
if (dictSize > prefixSize) dictSize = prefixSize;
|
||||
if (safeBuffer == NULL) assert(dictSize == 0);
|
||||
if (dictSize > 0)
|
||||
KLZ4_memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
|
||||
{ U32 const endIndex = (U32)(streamPtr->end - streamPtr->prefixStart) + streamPtr->dictLimit;
|
||||
memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
|
||||
{ U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
|
||||
streamPtr->end = (const BYTE*)safeBuffer + dictSize;
|
||||
streamPtr->prefixStart = streamPtr->end - dictSize;
|
||||
streamPtr->base = streamPtr->end - endIndex;
|
||||
streamPtr->dictLimit = endIndex - (U32)dictSize;
|
||||
streamPtr->lowLimit = endIndex - (U32)dictSize;
|
||||
streamPtr->dictStart = streamPtr->prefixStart;
|
||||
if (streamPtr->nextToUpdate < streamPtr->dictLimit)
|
||||
streamPtr->nextToUpdate = streamPtr->dictLimit;
|
||||
}
|
||||
|
|
@ -1213,7 +1199,7 @@ int KLZ4_compressHC_limitedOutput_continue (KLZ4_streamHC_t* ctx, const char* sr
|
|||
|
||||
|
||||
/* Deprecated streaming functions */
|
||||
int KLZ4_sizeofStreamStateHC(void) { return sizeof(KLZ4_streamHC_t); }
|
||||
int KLZ4_sizeofStreamStateHC(void) { return KLZ4_STREAMHCSIZE; }
|
||||
|
||||
/* state is presumed correctly sized, aka >= sizeof(KLZ4_streamHC_t)
|
||||
* @return : 0 on success, !=0 if error */
|
||||
|
|
@ -1225,7 +1211,6 @@ int KLZ4_resetStreamStateHC(void* state, char* inputBuffer)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if !defined(KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
|
||||
void* KLZ4_createHC (const char* inputBuffer)
|
||||
{
|
||||
KLZ4_streamHC_t* const hc4 = KLZ4_createStreamHC();
|
||||
|
|
@ -1240,7 +1225,6 @@ int KLZ4_freeHC (void* KLZ4HC_Data)
|
|||
FREEMEM(KLZ4HC_Data);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int KLZ4_compressHC2_continue (void* KLZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel)
|
||||
{
|
||||
|
|
@ -1254,11 +1238,11 @@ int KLZ4_compressHC2_limitedOutput_continue (void* KLZ4HC_Data, const char* src,
|
|||
|
||||
char* KLZ4_slideInputBufferHC(void* KLZ4HC_Data)
|
||||
{
|
||||
KLZ4_streamHC_t* const ctx = (KLZ4_streamHC_t*)KLZ4HC_Data;
|
||||
const BYTE* bufferStart = ctx->internal_donotuse.prefixStart - ctx->internal_donotuse.dictLimit + ctx->internal_donotuse.lowLimit;
|
||||
KLZ4_streamHC_t *ctx = (KLZ4_streamHC_t*)KLZ4HC_Data;
|
||||
const BYTE *bufferStart = ctx->internal_donotuse.base + ctx->internal_donotuse.lowLimit;
|
||||
KLZ4_resetStreamHC_fast(ctx, ctx->internal_donotuse.compressionLevel);
|
||||
/* avoid const char * -> char * conversion warning :( */
|
||||
return (char*)(uptrval)bufferStart;
|
||||
return (char *)(uptrval)bufferStart;
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1341,7 +1325,7 @@ static int KLZ4HC_compress_optimal ( KLZ4HC_CCtx_internal* ctx,
|
|||
{
|
||||
int retval = 0;
|
||||
#define TRAILING_LITERALS 3
|
||||
#if defined(KLZ4HC_HEAPMODE) && KLZ4HC_HEAPMODE==1
|
||||
#ifdef KLZ4HC_HEAPMODE
|
||||
KLZ4HC_optimal_t* const opt = (KLZ4HC_optimal_t*)ALLOC(sizeof(KLZ4HC_optimal_t) * (KLZ4_OPT_NUM + TRAILING_LITERALS));
|
||||
#else
|
||||
KLZ4HC_optimal_t opt[KLZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */
|
||||
|
|
@ -1359,7 +1343,7 @@ static int KLZ4HC_compress_optimal ( KLZ4HC_CCtx_internal* ctx,
|
|||
const BYTE* ovref = NULL;
|
||||
|
||||
/* init */
|
||||
#if defined(KLZ4HC_HEAPMODE) && KLZ4HC_HEAPMODE==1
|
||||
#ifdef KLZ4HC_HEAPMODE
|
||||
if (opt == NULL) goto _return_label;
|
||||
#endif
|
||||
DEBUGLOG(5, "KLZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity);
|
||||
|
|
@ -1591,7 +1575,7 @@ _last_literals:
|
|||
} else {
|
||||
*op++ = (BYTE)(lastRunSize << ML_BITS);
|
||||
}
|
||||
KLZ4_memcpy(op, anchor, lastRunSize);
|
||||
memcpy(op, anchor, lastRunSize);
|
||||
op += lastRunSize;
|
||||
}
|
||||
|
||||
|
|
@ -1624,7 +1608,7 @@ if (limit == fillOutput) {
|
|||
goto _last_literals;
|
||||
}
|
||||
_return_label:
|
||||
#if defined(KLZ4HC_HEAPMODE) && KLZ4HC_HEAPMODE==1
|
||||
#ifdef KLZ4HC_HEAPMODE
|
||||
FREEMEM(opt);
|
||||
#endif
|
||||
return retval;
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
KLZ4 HC - High Compression Mode of KLZ4
|
||||
Header File
|
||||
Copyright (C) 2011-2020, Yann Collet.
|
||||
Copyright (C) 2011-2017, Yann Collet.
|
||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -198,17 +198,14 @@ KLZ4LIB_API int KLZ4_saveDictHC (KLZ4_streamHC_t* streamHCPtr, char* safeBuffer,
|
|||
#define KLZ4HC_HASH_MASK (KLZ4HC_HASHTABLESIZE - 1)
|
||||
|
||||
|
||||
/* Never ever use these definitions directly !
|
||||
* Declare or allocate an KLZ4_streamHC_t instead.
|
||||
**/
|
||||
typedef struct KLZ4HC_CCtx_internal KLZ4HC_CCtx_internal;
|
||||
struct KLZ4HC_CCtx_internal
|
||||
{
|
||||
KLZ4_u32 hashTable[KLZ4HC_HASHTABLESIZE];
|
||||
KLZ4_u16 chainTable[KLZ4HC_MAXD];
|
||||
const KLZ4_byte* end; /* next block here to continue on current prefix */
|
||||
const KLZ4_byte* prefixStart; /* Indexes relative to this position */
|
||||
const KLZ4_byte* dictStart; /* alternate reference for extDict */
|
||||
const KLZ4_byte* base; /* All index relative to this position */
|
||||
const KLZ4_byte* dictBase; /* alternate base for extDict */
|
||||
KLZ4_u32 dictLimit; /* below that point, need extDict */
|
||||
KLZ4_u32 lowLimit; /* below that point, no more dict */
|
||||
KLZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
|
||||
|
|
@ -219,15 +216,20 @@ struct KLZ4HC_CCtx_internal
|
|||
const KLZ4HC_CCtx_internal* dictCtx;
|
||||
};
|
||||
|
||||
#define KLZ4_STREAMHC_MINSIZE 262200 /* static size, for inter-version compatibility */
|
||||
|
||||
/* Do not use these definitions directly !
|
||||
* Declare or allocate an KLZ4_streamHC_t instead.
|
||||
*/
|
||||
#define KLZ4_STREAMHCSIZE 262200 /* static size, for inter-version compatibility */
|
||||
#define KLZ4_STREAMHCSIZE_VOIDP (KLZ4_STREAMHCSIZE / sizeof(void*))
|
||||
union KLZ4_streamHC_u {
|
||||
char minStateSize[KLZ4_STREAMHC_MINSIZE];
|
||||
void* table[KLZ4_STREAMHCSIZE_VOIDP];
|
||||
KLZ4HC_CCtx_internal internal_donotuse;
|
||||
}; /* previously typedef'd to KLZ4_streamHC_t */
|
||||
|
||||
/* KLZ4_streamHC_t :
|
||||
* This structure allows static allocation of KLZ4 HC streaming state.
|
||||
* This can be used to allocate statically on stack, or as part of a larger structure.
|
||||
* This can be used to allocate statically, on state, or as part of a larger structure.
|
||||
*
|
||||
* Such state **must** be initialized using KLZ4_initStreamHC() before first use.
|
||||
*
|
||||
|
|
@ -242,7 +244,7 @@ union KLZ4_streamHC_u {
|
|||
* Required before first use of a statically allocated KLZ4_streamHC_t.
|
||||
* Before v1.9.0 : use KLZ4_resetStreamHC() instead
|
||||
*/
|
||||
KLZ4LIB_API KLZ4_streamHC_t* KLZ4_initStreamHC(void* buffer, size_t size);
|
||||
KLZ4LIB_API KLZ4_streamHC_t* KLZ4_initStreamHC (void* buffer, size_t size);
|
||||
|
||||
|
||||
/*-************************************
|
||||
|
|
@ -270,11 +272,9 @@ KLZ4_DEPRECATED("use KLZ4_compress_HC_continue() instead") KLZ4LIB_API int KLZ4_
|
|||
* KLZ4_slideInputBufferHC() will truncate the history of the stream, rather
|
||||
* than preserve a window-sized chunk of history.
|
||||
*/
|
||||
#if !defined(KLZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
|
||||
KLZ4_DEPRECATED("use KLZ4_createStreamHC() instead") KLZ4LIB_API void* KLZ4_createHC (const char* inputBuffer);
|
||||
KLZ4_DEPRECATED("use KLZ4_freeStreamHC() instead") KLZ4LIB_API int KLZ4_freeHC (void* KLZ4HC_Data);
|
||||
#endif
|
||||
KLZ4_DEPRECATED("use KLZ4_saveDictHC() instead") KLZ4LIB_API char* KLZ4_slideInputBufferHC (void* KLZ4HC_Data);
|
||||
KLZ4_DEPRECATED("use KLZ4_freeStreamHC() instead") KLZ4LIB_API int KLZ4_freeHC (void* KLZ4HC_Data);
|
||||
KLZ4_DEPRECATED("use KLZ4_compress_HC_continue() instead") KLZ4LIB_API int KLZ4_compressHC2_continue (void* KLZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel);
|
||||
KLZ4_DEPRECATED("use KLZ4_compress_HC_continue() instead") KLZ4LIB_API int KLZ4_compressHC2_limitedOutput_continue (void* KLZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
|
||||
KLZ4_DEPRECATED("use KLZ4_createStreamHC() instead") KLZ4LIB_API int KLZ4_sizeofStreamStateHC(void);
|
||||
|
|
@ -305,7 +305,7 @@ KLZ4LIB_API void KLZ4_resetStreamHC (KLZ4_streamHC_t* streamHCPtr, int compressi
|
|||
* They should not be linked from DLL,
|
||||
* as there is no guarantee of API stability yet.
|
||||
* Prototypes will be promoted to "stable" status
|
||||
* after successful usage in real-life scenarios.
|
||||
* after successfull usage in real-life scenarios.
|
||||
***************************************************/
|
||||
#ifdef KLZ4_HC_STATIC_LINKING_ONLY /* protection macro */
|
||||
#ifndef KLZ4_HC_SLO_098092834
|
||||
|
|
|
|||
|
|
@ -1,917 +0,0 @@
|
|||
/* Common parts of the nanopb library. Most of these are quite low-level
|
||||
* stuff. For the high-level interface, see pb_encode.h and pb_decode.h.
|
||||
*/
|
||||
|
||||
#ifndef PB_H_INCLUDED
|
||||
#define PB_H_INCLUDED
|
||||
|
||||
/*****************************************************************
|
||||
* Nanopb compilation time options. You can change these here by *
|
||||
* uncommenting the lines, or on the compiler command line. *
|
||||
*****************************************************************/
|
||||
|
||||
/* Enable support for dynamically allocated fields */
|
||||
/* #define PB_ENABLE_MALLOC 1 */
|
||||
|
||||
/* Define this if your CPU / compiler combination does not support
|
||||
* unaligned memory access to packed structures. Note that packed
|
||||
* structures are only used when requested in .proto options. */
|
||||
/* #define PB_NO_PACKED_STRUCTS 1 */
|
||||
|
||||
/* Increase the number of required fields that are tracked.
|
||||
* A compiler warning will tell if you need this. */
|
||||
/* #define PB_MAX_REQUIRED_FIELDS 256 */
|
||||
|
||||
/* Add support for tag numbers > 65536 and fields larger than 65536 bytes. */
|
||||
/* #define PB_FIELD_32BIT 1 */
|
||||
|
||||
/* Disable support for error messages in order to save some code space. */
|
||||
/* #define PB_NO_ERRMSG 1 */
|
||||
|
||||
/* Disable support for custom streams (support only memory buffers). */
|
||||
/* #define PB_BUFFER_ONLY 1 */
|
||||
|
||||
/* Disable support for 64-bit datatypes, for compilers without int64_t
|
||||
or to save some code space. */
|
||||
/* #define PB_WITHOUT_64BIT 1 */
|
||||
|
||||
/* Don't encode scalar arrays as packed. This is only to be used when
|
||||
* the decoder on the receiving side cannot process packed scalar arrays.
|
||||
* Such example is older protobuf.js. */
|
||||
/* #define PB_ENCODE_ARRAYS_UNPACKED 1 */
|
||||
|
||||
/* Enable conversion of doubles to floats for platforms that do not
|
||||
* support 64-bit doubles. Most commonly AVR. */
|
||||
/* #define PB_CONVERT_DOUBLE_FLOAT 1 */
|
||||
|
||||
/* Check whether incoming strings are valid UTF-8 sequences. Slows down
|
||||
* the string processing slightly and slightly increases code size. */
|
||||
/* #define PB_VALIDATE_UTF8 1 */
|
||||
|
||||
/* This can be defined if the platform is little-endian and has 8-bit bytes.
|
||||
* Normally it is automatically detected based on __BYTE_ORDER__ macro. */
|
||||
/* #define PB_LITTLE_ENDIAN_8BIT 1 */
|
||||
|
||||
/* Configure static assert mechanism. Instead of changing these, set your
|
||||
* compiler to C11 standard mode if possible. */
|
||||
/* #define PB_C99_STATIC_ASSERT 1 */
|
||||
/* #define PB_NO_STATIC_ASSERT 1 */
|
||||
|
||||
/******************************************************************
|
||||
* You usually don't need to change anything below this line. *
|
||||
* Feel free to look around and use the defined macros, though. *
|
||||
******************************************************************/
|
||||
|
||||
|
||||
/* Version of the nanopb library. Just in case you want to check it in
|
||||
* your own program. */
|
||||
#define NANOPB_VERSION "nanopb-0.4.8-dev"
|
||||
|
||||
/* Include all the system headers needed by nanopb. You will need the
|
||||
* definitions of the following:
|
||||
* - strlen, memcpy, memset functions
|
||||
* - [u]int_least8_t, uint_fast8_t, [u]int_least16_t, [u]int32_t, [u]int64_t
|
||||
* - size_t
|
||||
* - bool
|
||||
*
|
||||
* If you don't have the standard header files, you can instead provide
|
||||
* a custom header that defines or includes all this. In that case,
|
||||
* define PB_SYSTEM_HEADER to the path of this file.
|
||||
*/
|
||||
#ifdef PB_SYSTEM_HEADER
|
||||
#include PB_SYSTEM_HEADER
|
||||
#else
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include <stdbool.h>
|
||||
#include <string.h>
|
||||
#include <limits.h>
|
||||
|
||||
#ifdef PB_ENABLE_MALLOC
|
||||
#include <stdlib.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Macro for defining packed structures (compiler dependent).
|
||||
* This just reduces memory requirements, but is not required.
|
||||
*/
|
||||
#if defined(PB_NO_PACKED_STRUCTS)
|
||||
/* Disable struct packing */
|
||||
# define PB_PACKED_STRUCT_START
|
||||
# define PB_PACKED_STRUCT_END
|
||||
# define pb_packed
|
||||
#elif defined(__GNUC__) || defined(__clang__)
|
||||
/* For GCC and clang */
|
||||
# define PB_PACKED_STRUCT_START
|
||||
# define PB_PACKED_STRUCT_END
|
||||
# define pb_packed __attribute__((packed))
|
||||
#elif defined(__ICCARM__) || defined(__CC_ARM)
|
||||
/* For IAR ARM and Keil MDK-ARM compilers */
|
||||
# define PB_PACKED_STRUCT_START _Pragma("pack(push, 1)")
|
||||
# define PB_PACKED_STRUCT_END _Pragma("pack(pop)")
|
||||
# define pb_packed
|
||||
#elif defined(_MSC_VER) && (_MSC_VER >= 1500)
|
||||
/* For Microsoft Visual C++ */
|
||||
# define PB_PACKED_STRUCT_START __pragma(pack(push, 1))
|
||||
# define PB_PACKED_STRUCT_END __pragma(pack(pop))
|
||||
# define pb_packed
|
||||
#else
|
||||
/* Unknown compiler */
|
||||
# define PB_PACKED_STRUCT_START
|
||||
# define PB_PACKED_STRUCT_END
|
||||
# define pb_packed
|
||||
#endif
|
||||
|
||||
/* Detect endianness */
|
||||
#ifndef PB_LITTLE_ENDIAN_8BIT
|
||||
#if ((defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN) || \
|
||||
(defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
|
||||
defined(__LITTLE_ENDIAN__) || defined(__ARMEL__) || \
|
||||
defined(__THUMBEL__) || defined(__AARCH64EL__) || defined(_MIPSEL) || \
|
||||
defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM)) \
|
||||
&& CHAR_BIT == 8
|
||||
#define PB_LITTLE_ENDIAN_8BIT 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Handly macro for suppressing unreferenced-parameter compiler warnings. */
|
||||
#ifndef PB_UNUSED
|
||||
#define PB_UNUSED(x) (void)(x)
|
||||
#endif
|
||||
|
||||
/* Harvard-architecture processors may need special attributes for storing
|
||||
* field information in program memory. */
|
||||
#ifndef PB_PROGMEM
|
||||
#ifdef __AVR__
|
||||
#include <avr/pgmspace.h>
|
||||
#define PB_PROGMEM PROGMEM
|
||||
#define PB_PROGMEM_READU32(x) pgm_read_dword(&x)
|
||||
#else
|
||||
#define PB_PROGMEM
|
||||
#define PB_PROGMEM_READU32(x) (x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Compile-time assertion, used for checking compatible compilation options.
|
||||
* If this does not work properly on your compiler, use
|
||||
* #define PB_NO_STATIC_ASSERT to disable it.
|
||||
*
|
||||
* But before doing that, check carefully the error message / place where it
|
||||
* comes from to see if the error has a real cause. Unfortunately the error
|
||||
* message is not always very clear to read, but you can see the reason better
|
||||
* in the place where the PB_STATIC_ASSERT macro was called.
|
||||
*/
|
||||
#ifndef PB_NO_STATIC_ASSERT
|
||||
# ifndef PB_STATIC_ASSERT
|
||||
# if defined(__ICCARM__)
|
||||
/* IAR has static_assert keyword but no _Static_assert */
|
||||
# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG);
|
||||
# elif defined(_MSC_VER) && (!defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112)
|
||||
/* MSVC in C89 mode supports static_assert() keyword anyway */
|
||||
# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG);
|
||||
# elif defined(PB_C99_STATIC_ASSERT)
|
||||
/* Classic negative-size-array static assert mechanism */
|
||||
# define PB_STATIC_ASSERT(COND,MSG) typedef char PB_STATIC_ASSERT_MSG(MSG, __LINE__, __COUNTER__)[(COND)?1:-1];
|
||||
# define PB_STATIC_ASSERT_MSG(MSG, LINE, COUNTER) PB_STATIC_ASSERT_MSG_(MSG, LINE, COUNTER)
|
||||
# define PB_STATIC_ASSERT_MSG_(MSG, LINE, COUNTER) pb_static_assertion_##MSG##_##LINE##_##COUNTER
|
||||
# elif defined(__cplusplus)
|
||||
/* C++11 standard static_assert mechanism */
|
||||
# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG);
|
||||
# else
|
||||
/* C11 standard _Static_assert mechanism */
|
||||
# define PB_STATIC_ASSERT(COND,MSG) _Static_assert(COND,#MSG);
|
||||
# endif
|
||||
# endif
|
||||
#else
|
||||
/* Static asserts disabled by PB_NO_STATIC_ASSERT */
|
||||
# define PB_STATIC_ASSERT(COND,MSG)
|
||||
#endif
|
||||
|
||||
/* Test that PB_STATIC_ASSERT works
|
||||
* If you get errors here, you may need to do one of these:
|
||||
* - Enable C11 standard support in your compiler
|
||||
* - Define PB_C99_STATIC_ASSERT to enable C99 standard support
|
||||
* - Define PB_NO_STATIC_ASSERT to disable static asserts altogether
|
||||
*/
|
||||
PB_STATIC_ASSERT(1, STATIC_ASSERT_IS_NOT_WORKING)
|
||||
|
||||
/* Number of required fields to keep track of. */
|
||||
#ifndef PB_MAX_REQUIRED_FIELDS
|
||||
#define PB_MAX_REQUIRED_FIELDS 64
|
||||
#endif
|
||||
|
||||
#if PB_MAX_REQUIRED_FIELDS < 64
|
||||
#error You should not lower PB_MAX_REQUIRED_FIELDS from the default value (64).
|
||||
#endif
|
||||
|
||||
#ifdef PB_WITHOUT_64BIT
|
||||
#ifdef PB_CONVERT_DOUBLE_FLOAT
|
||||
/* Cannot use doubles without 64-bit types */
|
||||
#undef PB_CONVERT_DOUBLE_FLOAT
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* List of possible field types. These are used in the autogenerated code.
|
||||
* Least-significant 4 bits tell the scalar type
|
||||
* Most-significant 4 bits specify repeated/required/packed etc.
|
||||
*/
|
||||
|
||||
typedef uint_least8_t pb_type_t;
|
||||
|
||||
/**** Field data types ****/
|
||||
|
||||
/* Numeric types */
|
||||
#define PB_LTYPE_BOOL 0x00U /* bool */
|
||||
#define PB_LTYPE_VARINT 0x01U /* int32, int64, enum, bool */
|
||||
#define PB_LTYPE_UVARINT 0x02U /* uint32, uint64 */
|
||||
#define PB_LTYPE_SVARINT 0x03U /* sint32, sint64 */
|
||||
#define PB_LTYPE_FIXED32 0x04U /* fixed32, sfixed32, float */
|
||||
#define PB_LTYPE_FIXED64 0x05U /* fixed64, sfixed64, double */
|
||||
|
||||
/* Marker for last packable field type. */
|
||||
#define PB_LTYPE_LAST_PACKABLE 0x05U
|
||||
|
||||
/* Byte array with pre-allocated buffer.
|
||||
* data_size is the length of the allocated PB_BYTES_ARRAY structure. */
|
||||
#define PB_LTYPE_BYTES 0x06U
|
||||
|
||||
/* String with pre-allocated buffer.
|
||||
* data_size is the maximum length. */
|
||||
#define PB_LTYPE_STRING 0x07U
|
||||
|
||||
/* Submessage
|
||||
* submsg_fields is pointer to field descriptions */
|
||||
#define PB_LTYPE_SUBMESSAGE 0x08U
|
||||
|
||||
/* Submessage with pre-decoding callback
|
||||
* The pre-decoding callback is stored as pb_callback_t right before pSize.
|
||||
* submsg_fields is pointer to field descriptions */
|
||||
#define PB_LTYPE_SUBMSG_W_CB 0x09U
|
||||
|
||||
/* Extension pseudo-field
|
||||
* The field contains a pointer to pb_extension_t */
|
||||
#define PB_LTYPE_EXTENSION 0x0AU
|
||||
|
||||
/* Byte array with inline, pre-allocated byffer.
|
||||
* data_size is the length of the inline, allocated buffer.
|
||||
* This differs from PB_LTYPE_BYTES by defining the element as
|
||||
* pb_byte_t[data_size] rather than pb_bytes_array_t. */
|
||||
#define PB_LTYPE_FIXED_LENGTH_BYTES 0x0BU
|
||||
|
||||
/* Number of declared LTYPES */
|
||||
#define PB_LTYPES_COUNT 0x0CU
|
||||
#define PB_LTYPE_MASK 0x0FU
|
||||
|
||||
/**** Field repetition rules ****/
|
||||
|
||||
#define PB_HTYPE_REQUIRED 0x00U
|
||||
#define PB_HTYPE_OPTIONAL 0x10U
|
||||
#define PB_HTYPE_SINGULAR 0x10U
|
||||
#define PB_HTYPE_REPEATED 0x20U
|
||||
#define PB_HTYPE_FIXARRAY 0x20U
|
||||
#define PB_HTYPE_ONEOF 0x30U
|
||||
#define PB_HTYPE_MASK 0x30U
|
||||
|
||||
/**** Field allocation types ****/
|
||||
|
||||
#define PB_ATYPE_STATIC 0x00U
|
||||
#define PB_ATYPE_POINTER 0x80U
|
||||
#define PB_ATYPE_CALLBACK 0x40U
|
||||
#define PB_ATYPE_MASK 0xC0U
|
||||
|
||||
#define PB_ATYPE(x) ((x) & PB_ATYPE_MASK)
|
||||
#define PB_HTYPE(x) ((x) & PB_HTYPE_MASK)
|
||||
#define PB_LTYPE(x) ((x) & PB_LTYPE_MASK)
|
||||
#define PB_LTYPE_IS_SUBMSG(x) (PB_LTYPE(x) == PB_LTYPE_SUBMESSAGE || \
|
||||
PB_LTYPE(x) == PB_LTYPE_SUBMSG_W_CB)
|
||||
|
||||
/* Data type used for storing sizes of struct fields
|
||||
* and array counts.
|
||||
*/
|
||||
#if defined(PB_FIELD_32BIT)
|
||||
typedef uint32_t pb_size_t;
|
||||
typedef int32_t pb_ssize_t;
|
||||
#else
|
||||
typedef uint_least16_t pb_size_t;
|
||||
typedef int_least16_t pb_ssize_t;
|
||||
#endif
|
||||
#define PB_SIZE_MAX ((pb_size_t)-1)
|
||||
|
||||
/* Data type for storing encoded data and other byte streams.
|
||||
* This typedef exists to support platforms where uint8_t does not exist.
|
||||
* You can regard it as equivalent on uint8_t on other platforms.
|
||||
*/
|
||||
typedef uint_least8_t pb_byte_t;
|
||||
|
||||
/* Forward declaration of struct types */
|
||||
typedef struct pb_istream_s pb_istream_t;
|
||||
typedef struct pb_ostream_s pb_ostream_t;
|
||||
typedef struct pb_field_iter_s pb_field_iter_t;
|
||||
|
||||
/* This structure is used in auto-generated constants
|
||||
* to specify struct fields.
|
||||
*/
|
||||
typedef struct pb_msgdesc_s pb_msgdesc_t;
|
||||
struct pb_msgdesc_s {
|
||||
const uint32_t *field_info;
|
||||
const pb_msgdesc_t * const * submsg_info;
|
||||
const pb_byte_t *default_value;
|
||||
|
||||
bool (*field_callback)(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_iter_t *field);
|
||||
|
||||
pb_size_t field_count;
|
||||
pb_size_t required_field_count;
|
||||
pb_size_t largest_tag;
|
||||
};
|
||||
|
||||
/* Iterator for message descriptor */
|
||||
struct pb_field_iter_s {
|
||||
const pb_msgdesc_t *descriptor; /* Pointer to message descriptor constant */
|
||||
void *message; /* Pointer to start of the structure */
|
||||
|
||||
pb_size_t index; /* Index of the field */
|
||||
pb_size_t field_info_index; /* Index to descriptor->field_info array */
|
||||
pb_size_t required_field_index; /* Index that counts only the required fields */
|
||||
pb_size_t submessage_index; /* Index that counts only submessages */
|
||||
|
||||
pb_size_t tag; /* Tag of current field */
|
||||
pb_size_t data_size; /* sizeof() of a single item */
|
||||
pb_size_t array_size; /* Number of array entries */
|
||||
pb_type_t type; /* Type of current field */
|
||||
|
||||
void *pField; /* Pointer to current field in struct */
|
||||
void *pData; /* Pointer to current data contents. Different than pField for arrays and pointers. */
|
||||
void *pSize; /* Pointer to count/has field */
|
||||
|
||||
const pb_msgdesc_t *submsg_desc; /* For submessage fields, pointer to field descriptor for the submessage. */
|
||||
};
|
||||
|
||||
/* For compatibility with legacy code */
|
||||
typedef pb_field_iter_t pb_field_t;
|
||||
|
||||
/* Make sure that the standard integer types are of the expected sizes.
|
||||
* Otherwise fixed32/fixed64 fields can break.
|
||||
*
|
||||
* If you get errors here, it probably means that your stdint.h is not
|
||||
* correct for your platform.
|
||||
*/
|
||||
#ifndef PB_WITHOUT_64BIT
|
||||
PB_STATIC_ASSERT(sizeof(int64_t) == 2 * sizeof(int32_t), INT64_T_WRONG_SIZE)
|
||||
PB_STATIC_ASSERT(sizeof(uint64_t) == 2 * sizeof(uint32_t), UINT64_T_WRONG_SIZE)
|
||||
#endif
|
||||
|
||||
/* This structure is used for 'bytes' arrays.
|
||||
* It has the number of bytes in the beginning, and after that an array.
|
||||
* Note that actual structs used will have a different length of bytes array.
|
||||
*/
|
||||
#define PB_BYTES_ARRAY_T(n) struct { pb_size_t size; pb_byte_t bytes[n]; }
|
||||
#define PB_BYTES_ARRAY_T_ALLOCSIZE(n) ((size_t)n + offsetof(pb_bytes_array_t, bytes))
|
||||
|
||||
struct pb_bytes_array_s {
|
||||
pb_size_t size;
|
||||
pb_byte_t bytes[1];
|
||||
};
|
||||
typedef struct pb_bytes_array_s pb_bytes_array_t;
|
||||
|
||||
/* This structure is used for giving the callback function.
|
||||
* It is stored in the message structure and filled in by the method that
|
||||
* calls pb_decode.
|
||||
*
|
||||
* The decoding callback will be given a limited-length stream
|
||||
* If the wire type was string, the length is the length of the string.
|
||||
* If the wire type was a varint/fixed32/fixed64, the length is the length
|
||||
* of the actual value.
|
||||
* The function may be called multiple times (especially for repeated types,
|
||||
* but also otherwise if the message happens to contain the field multiple
|
||||
* times.)
|
||||
*
|
||||
* The encoding callback will receive the actual output stream.
|
||||
* It should write all the data in one call, including the field tag and
|
||||
* wire type. It can write multiple fields.
|
||||
*
|
||||
* The callback can be null if you want to skip a field.
|
||||
*/
|
||||
typedef struct pb_callback_s pb_callback_t;
|
||||
struct pb_callback_s {
|
||||
/* Callback functions receive a pointer to the arg field.
|
||||
* You can access the value of the field as *arg, and modify it if needed.
|
||||
*/
|
||||
union {
|
||||
bool (*decode)(pb_istream_t *stream, const pb_field_t *field, void **arg);
|
||||
bool (*encode)(pb_ostream_t *stream, const pb_field_t *field, void * const *arg);
|
||||
} funcs;
|
||||
|
||||
/* Free arg for use by callback */
|
||||
void *arg;
|
||||
};
|
||||
|
||||
extern bool pb_default_field_callback(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_t *field);
|
||||
|
||||
/* Wire types. Library user needs these only in encoder callbacks. */
|
||||
typedef enum {
|
||||
PB_WT_VARINT = 0,
|
||||
PB_WT_64BIT = 1,
|
||||
PB_WT_STRING = 2,
|
||||
PB_WT_32BIT = 5,
|
||||
PB_WT_PACKED = 255 /* PB_WT_PACKED is internal marker for packed arrays. */
|
||||
} pb_wire_type_t;
|
||||
|
||||
/* Structure for defining the handling of unknown/extension fields.
|
||||
* Usually the pb_extension_type_t structure is automatically generated,
|
||||
* while the pb_extension_t structure is created by the user. However,
|
||||
* if you want to catch all unknown fields, you can also create a custom
|
||||
* pb_extension_type_t with your own callback.
|
||||
*/
|
||||
typedef struct pb_extension_type_s pb_extension_type_t;
|
||||
typedef struct pb_extension_s pb_extension_t;
|
||||
struct pb_extension_type_s {
|
||||
/* Called for each unknown field in the message.
|
||||
* If you handle the field, read off all of its data and return true.
|
||||
* If you do not handle the field, do not read anything and return true.
|
||||
* If you run into an error, return false.
|
||||
* Set to NULL for default handler.
|
||||
*/
|
||||
bool (*decode)(pb_istream_t *stream, pb_extension_t *extension,
|
||||
uint32_t tag, pb_wire_type_t wire_type);
|
||||
|
||||
/* Called once after all regular fields have been encoded.
|
||||
* If you have something to write, do so and return true.
|
||||
* If you do not have anything to write, just return true.
|
||||
* If you run into an error, return false.
|
||||
* Set to NULL for default handler.
|
||||
*/
|
||||
bool (*encode)(pb_ostream_t *stream, const pb_extension_t *extension);
|
||||
|
||||
/* Free field for use by the callback. */
|
||||
const void *arg;
|
||||
};
|
||||
|
||||
struct pb_extension_s {
|
||||
/* Type describing the extension field. Usually you'll initialize
|
||||
* this to a pointer to the automatically generated structure. */
|
||||
const pb_extension_type_t *type;
|
||||
|
||||
/* Destination for the decoded data. This must match the datatype
|
||||
* of the extension field. */
|
||||
void *dest;
|
||||
|
||||
/* Pointer to the next extension handler, or NULL.
|
||||
* If this extension does not match a field, the next handler is
|
||||
* automatically called. */
|
||||
pb_extension_t *next;
|
||||
|
||||
/* The decoder sets this to true if the extension was found.
|
||||
* Ignored for encoding. */
|
||||
bool found;
|
||||
};
|
||||
|
||||
#define pb_extension_init_zero {NULL,NULL,NULL,false}
|
||||
|
||||
/* Memory allocation functions to use. You can define pb_realloc and
|
||||
* pb_free to custom functions if you want. */
|
||||
#ifdef PB_ENABLE_MALLOC
|
||||
# ifndef pb_realloc
|
||||
# define pb_realloc(ptr, size) realloc(ptr, size)
|
||||
# endif
|
||||
# ifndef pb_free
|
||||
# define pb_free(ptr) free(ptr)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* This is used to inform about need to regenerate .pb.h/.pb.c files. */
|
||||
#define PB_PROTO_HEADER_VERSION 40
|
||||
|
||||
/* These macros are used to declare pb_field_t's in the constant array. */
|
||||
/* Size of a structure member, in bytes. */
|
||||
#define pb_membersize(st, m) (sizeof ((st*)0)->m)
|
||||
/* Number of entries in an array. */
|
||||
#define pb_arraysize(st, m) (pb_membersize(st, m) / pb_membersize(st, m[0]))
|
||||
/* Delta from start of one member to the start of another member. */
|
||||
#define pb_delta(st, m1, m2) ((int)offsetof(st, m1) - (int)offsetof(st, m2))
|
||||
|
||||
/* Force expansion of macro value */
|
||||
#define PB_EXPAND(x) x
|
||||
|
||||
/* Binding of a message field set into a specific structure */
|
||||
#define PB_BIND(msgname, structname, width) \
|
||||
const uint32_t structname ## _field_info[] PB_PROGMEM = \
|
||||
{ \
|
||||
msgname ## _FIELDLIST(PB_GEN_FIELD_INFO_ ## width, structname) \
|
||||
0 \
|
||||
}; \
|
||||
const pb_msgdesc_t* const structname ## _submsg_info[] = \
|
||||
{ \
|
||||
msgname ## _FIELDLIST(PB_GEN_SUBMSG_INFO, structname) \
|
||||
NULL \
|
||||
}; \
|
||||
const pb_msgdesc_t structname ## _msg = \
|
||||
{ \
|
||||
structname ## _field_info, \
|
||||
structname ## _submsg_info, \
|
||||
msgname ## _DEFAULT, \
|
||||
msgname ## _CALLBACK, \
|
||||
0 msgname ## _FIELDLIST(PB_GEN_FIELD_COUNT, structname), \
|
||||
0 msgname ## _FIELDLIST(PB_GEN_REQ_FIELD_COUNT, structname), \
|
||||
0 msgname ## _FIELDLIST(PB_GEN_LARGEST_TAG, structname), \
|
||||
}; \
|
||||
msgname ## _FIELDLIST(PB_GEN_FIELD_INFO_ASSERT_ ## width, structname)
|
||||
|
||||
#define PB_GEN_FIELD_COUNT(structname, atype, htype, ltype, fieldname, tag) +1
|
||||
#define PB_GEN_REQ_FIELD_COUNT(structname, atype, htype, ltype, fieldname, tag) \
|
||||
+ (PB_HTYPE_ ## htype == PB_HTYPE_REQUIRED)
|
||||
#define PB_GEN_LARGEST_TAG(structname, atype, htype, ltype, fieldname, tag) \
|
||||
* 0 + tag
|
||||
|
||||
/* X-macro for generating the entries in struct_field_info[] array. */
|
||||
#define PB_GEN_FIELD_INFO_1(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_1(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_GEN_FIELD_INFO_2(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_2(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_GEN_FIELD_INFO_4(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_4(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_GEN_FIELD_INFO_8(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_8(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_GEN_FIELD_INFO_AUTO(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_AUTO2(PB_FIELDINFO_WIDTH_AUTO(_PB_ATYPE_ ## atype, _PB_HTYPE_ ## htype, _PB_LTYPE_ ## ltype), \
|
||||
tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_FIELDINFO_AUTO2(width, tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_FIELDINFO_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size)
|
||||
|
||||
#define PB_FIELDINFO_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_FIELDINFO_ ## width(tag, type, data_offset, data_size, size_offset, array_size)
|
||||
|
||||
/* X-macro for generating asserts that entries fit in struct_field_info[] array.
|
||||
* The structure of macros here must match the structure above in PB_GEN_FIELD_INFO_x(),
|
||||
* but it is not easily reused because of how macro substitutions work. */
|
||||
#define PB_GEN_FIELD_INFO_ASSERT_1(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_ASSERT_1(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_GEN_FIELD_INFO_ASSERT_2(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_ASSERT_2(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_GEN_FIELD_INFO_ASSERT_4(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_ASSERT_4(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_GEN_FIELD_INFO_ASSERT_8(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_ASSERT_8(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_GEN_FIELD_INFO_ASSERT_AUTO(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_FIELDINFO_ASSERT_AUTO2(PB_FIELDINFO_WIDTH_AUTO(_PB_ATYPE_ ## atype, _PB_HTYPE_ ## htype, _PB_LTYPE_ ## ltype), \
|
||||
tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
|
||||
PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
|
||||
PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
|
||||
|
||||
#define PB_FIELDINFO_ASSERT_AUTO2(width, tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_FIELDINFO_ASSERT_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size)
|
||||
|
||||
#define PB_FIELDINFO_ASSERT_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_FIELDINFO_ASSERT_ ## width(tag, type, data_offset, data_size, size_offset, array_size)
|
||||
|
||||
#define PB_DATA_OFFSET_STATIC(htype, structname, fieldname) PB_DO ## htype(structname, fieldname)
|
||||
#define PB_DATA_OFFSET_POINTER(htype, structname, fieldname) PB_DO ## htype(structname, fieldname)
|
||||
#define PB_DATA_OFFSET_CALLBACK(htype, structname, fieldname) PB_DO ## htype(structname, fieldname)
|
||||
#define PB_DO_PB_HTYPE_REQUIRED(structname, fieldname) offsetof(structname, fieldname)
|
||||
#define PB_DO_PB_HTYPE_SINGULAR(structname, fieldname) offsetof(structname, fieldname)
|
||||
#define PB_DO_PB_HTYPE_ONEOF(structname, fieldname) offsetof(structname, PB_ONEOF_NAME(FULL, fieldname))
|
||||
#define PB_DO_PB_HTYPE_OPTIONAL(structname, fieldname) offsetof(structname, fieldname)
|
||||
#define PB_DO_PB_HTYPE_REPEATED(structname, fieldname) offsetof(structname, fieldname)
|
||||
#define PB_DO_PB_HTYPE_FIXARRAY(structname, fieldname) offsetof(structname, fieldname)
|
||||
|
||||
#define PB_SIZE_OFFSET_STATIC(htype, structname, fieldname) PB_SO ## htype(structname, fieldname)
|
||||
#define PB_SIZE_OFFSET_POINTER(htype, structname, fieldname) PB_SO_PTR ## htype(structname, fieldname)
|
||||
#define PB_SIZE_OFFSET_CALLBACK(htype, structname, fieldname) PB_SO_CB ## htype(structname, fieldname)
|
||||
#define PB_SO_PB_HTYPE_REQUIRED(structname, fieldname) 0
|
||||
#define PB_SO_PB_HTYPE_SINGULAR(structname, fieldname) 0
|
||||
#define PB_SO_PB_HTYPE_ONEOF(structname, fieldname) PB_SO_PB_HTYPE_ONEOF2(structname, PB_ONEOF_NAME(FULL, fieldname), PB_ONEOF_NAME(UNION, fieldname))
|
||||
#define PB_SO_PB_HTYPE_ONEOF2(structname, fullname, unionname) PB_SO_PB_HTYPE_ONEOF3(structname, fullname, unionname)
|
||||
#define PB_SO_PB_HTYPE_ONEOF3(structname, fullname, unionname) pb_delta(structname, fullname, which_ ## unionname)
|
||||
#define PB_SO_PB_HTYPE_OPTIONAL(structname, fieldname) pb_delta(structname, fieldname, has_ ## fieldname)
|
||||
#define PB_SO_PB_HTYPE_REPEATED(structname, fieldname) pb_delta(structname, fieldname, fieldname ## _count)
|
||||
#define PB_SO_PB_HTYPE_FIXARRAY(structname, fieldname) 0
|
||||
#define PB_SO_PTR_PB_HTYPE_REQUIRED(structname, fieldname) 0
|
||||
#define PB_SO_PTR_PB_HTYPE_SINGULAR(structname, fieldname) 0
|
||||
#define PB_SO_PTR_PB_HTYPE_ONEOF(structname, fieldname) PB_SO_PB_HTYPE_ONEOF(structname, fieldname)
|
||||
#define PB_SO_PTR_PB_HTYPE_OPTIONAL(structname, fieldname) 0
|
||||
#define PB_SO_PTR_PB_HTYPE_REPEATED(structname, fieldname) PB_SO_PB_HTYPE_REPEATED(structname, fieldname)
|
||||
#define PB_SO_PTR_PB_HTYPE_FIXARRAY(structname, fieldname) 0
|
||||
#define PB_SO_CB_PB_HTYPE_REQUIRED(structname, fieldname) 0
|
||||
#define PB_SO_CB_PB_HTYPE_SINGULAR(structname, fieldname) 0
|
||||
#define PB_SO_CB_PB_HTYPE_ONEOF(structname, fieldname) PB_SO_PB_HTYPE_ONEOF(structname, fieldname)
|
||||
#define PB_SO_CB_PB_HTYPE_OPTIONAL(structname, fieldname) 0
|
||||
#define PB_SO_CB_PB_HTYPE_REPEATED(structname, fieldname) 0
|
||||
#define PB_SO_CB_PB_HTYPE_FIXARRAY(structname, fieldname) 0
|
||||
|
||||
#define PB_ARRAY_SIZE_STATIC(htype, structname, fieldname) PB_AS ## htype(structname, fieldname)
|
||||
#define PB_ARRAY_SIZE_POINTER(htype, structname, fieldname) PB_AS_PTR ## htype(structname, fieldname)
|
||||
#define PB_ARRAY_SIZE_CALLBACK(htype, structname, fieldname) 1
|
||||
#define PB_AS_PB_HTYPE_REQUIRED(structname, fieldname) 1
|
||||
#define PB_AS_PB_HTYPE_SINGULAR(structname, fieldname) 1
|
||||
#define PB_AS_PB_HTYPE_OPTIONAL(structname, fieldname) 1
|
||||
#define PB_AS_PB_HTYPE_ONEOF(structname, fieldname) 1
|
||||
#define PB_AS_PB_HTYPE_REPEATED(structname, fieldname) pb_arraysize(structname, fieldname)
|
||||
#define PB_AS_PB_HTYPE_FIXARRAY(structname, fieldname) pb_arraysize(structname, fieldname)
|
||||
#define PB_AS_PTR_PB_HTYPE_REQUIRED(structname, fieldname) 1
|
||||
#define PB_AS_PTR_PB_HTYPE_SINGULAR(structname, fieldname) 1
|
||||
#define PB_AS_PTR_PB_HTYPE_OPTIONAL(structname, fieldname) 1
|
||||
#define PB_AS_PTR_PB_HTYPE_ONEOF(structname, fieldname) 1
|
||||
#define PB_AS_PTR_PB_HTYPE_REPEATED(structname, fieldname) 1
|
||||
#define PB_AS_PTR_PB_HTYPE_FIXARRAY(structname, fieldname) pb_arraysize(structname, fieldname[0])
|
||||
|
||||
#define PB_DATA_SIZE_STATIC(htype, structname, fieldname) PB_DS ## htype(structname, fieldname)
|
||||
#define PB_DATA_SIZE_POINTER(htype, structname, fieldname) PB_DS_PTR ## htype(structname, fieldname)
|
||||
#define PB_DATA_SIZE_CALLBACK(htype, structname, fieldname) PB_DS_CB ## htype(structname, fieldname)
|
||||
#define PB_DS_PB_HTYPE_REQUIRED(structname, fieldname) pb_membersize(structname, fieldname)
|
||||
#define PB_DS_PB_HTYPE_SINGULAR(structname, fieldname) pb_membersize(structname, fieldname)
|
||||
#define PB_DS_PB_HTYPE_OPTIONAL(structname, fieldname) pb_membersize(structname, fieldname)
|
||||
#define PB_DS_PB_HTYPE_ONEOF(structname, fieldname) pb_membersize(structname, PB_ONEOF_NAME(FULL, fieldname))
|
||||
#define PB_DS_PB_HTYPE_REPEATED(structname, fieldname) pb_membersize(structname, fieldname[0])
|
||||
#define PB_DS_PB_HTYPE_FIXARRAY(structname, fieldname) pb_membersize(structname, fieldname[0])
|
||||
#define PB_DS_PTR_PB_HTYPE_REQUIRED(structname, fieldname) pb_membersize(structname, fieldname[0])
|
||||
#define PB_DS_PTR_PB_HTYPE_SINGULAR(structname, fieldname) pb_membersize(structname, fieldname[0])
|
||||
#define PB_DS_PTR_PB_HTYPE_OPTIONAL(structname, fieldname) pb_membersize(structname, fieldname[0])
|
||||
#define PB_DS_PTR_PB_HTYPE_ONEOF(structname, fieldname) pb_membersize(structname, PB_ONEOF_NAME(FULL, fieldname)[0])
|
||||
#define PB_DS_PTR_PB_HTYPE_REPEATED(structname, fieldname) pb_membersize(structname, fieldname[0])
|
||||
#define PB_DS_PTR_PB_HTYPE_FIXARRAY(structname, fieldname) pb_membersize(structname, fieldname[0][0])
|
||||
#define PB_DS_CB_PB_HTYPE_REQUIRED(structname, fieldname) pb_membersize(structname, fieldname)
|
||||
#define PB_DS_CB_PB_HTYPE_SINGULAR(structname, fieldname) pb_membersize(structname, fieldname)
|
||||
#define PB_DS_CB_PB_HTYPE_OPTIONAL(structname, fieldname) pb_membersize(structname, fieldname)
|
||||
#define PB_DS_CB_PB_HTYPE_ONEOF(structname, fieldname) pb_membersize(structname, PB_ONEOF_NAME(FULL, fieldname))
|
||||
#define PB_DS_CB_PB_HTYPE_REPEATED(structname, fieldname) pb_membersize(structname, fieldname)
|
||||
#define PB_DS_CB_PB_HTYPE_FIXARRAY(structname, fieldname) pb_membersize(structname, fieldname)
|
||||
|
||||
#define PB_ONEOF_NAME(type, tuple) PB_EXPAND(PB_ONEOF_NAME_ ## type tuple)
|
||||
#define PB_ONEOF_NAME_UNION(unionname,membername,fullname) unionname
|
||||
#define PB_ONEOF_NAME_MEMBER(unionname,membername,fullname) membername
|
||||
#define PB_ONEOF_NAME_FULL(unionname,membername,fullname) fullname
|
||||
|
||||
#define PB_GEN_SUBMSG_INFO(structname, atype, htype, ltype, fieldname, tag) \
|
||||
PB_SUBMSG_INFO_ ## htype(_PB_LTYPE_ ## ltype, structname, fieldname)
|
||||
|
||||
#define PB_SUBMSG_INFO_REQUIRED(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE)
|
||||
#define PB_SUBMSG_INFO_SINGULAR(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE)
|
||||
#define PB_SUBMSG_INFO_OPTIONAL(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE)
|
||||
#define PB_SUBMSG_INFO_ONEOF(ltype, structname, fieldname) PB_SUBMSG_INFO_ONEOF2(ltype, structname, PB_ONEOF_NAME(UNION, fieldname), PB_ONEOF_NAME(MEMBER, fieldname))
|
||||
#define PB_SUBMSG_INFO_ONEOF2(ltype, structname, unionname, membername) PB_SUBMSG_INFO_ONEOF3(ltype, structname, unionname, membername)
|
||||
#define PB_SUBMSG_INFO_ONEOF3(ltype, structname, unionname, membername) PB_SI ## ltype(structname ## _ ## unionname ## _ ## membername ## _MSGTYPE)
|
||||
#define PB_SUBMSG_INFO_REPEATED(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE)
|
||||
#define PB_SUBMSG_INFO_FIXARRAY(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE)
|
||||
#define PB_SI_PB_LTYPE_BOOL(t)
|
||||
#define PB_SI_PB_LTYPE_BYTES(t)
|
||||
#define PB_SI_PB_LTYPE_DOUBLE(t)
|
||||
#define PB_SI_PB_LTYPE_ENUM(t)
|
||||
#define PB_SI_PB_LTYPE_UENUM(t)
|
||||
#define PB_SI_PB_LTYPE_FIXED32(t)
|
||||
#define PB_SI_PB_LTYPE_FIXED64(t)
|
||||
#define PB_SI_PB_LTYPE_FLOAT(t)
|
||||
#define PB_SI_PB_LTYPE_INT32(t)
|
||||
#define PB_SI_PB_LTYPE_INT64(t)
|
||||
#define PB_SI_PB_LTYPE_MESSAGE(t) PB_SUBMSG_DESCRIPTOR(t)
|
||||
#define PB_SI_PB_LTYPE_MSG_W_CB(t) PB_SUBMSG_DESCRIPTOR(t)
|
||||
#define PB_SI_PB_LTYPE_SFIXED32(t)
|
||||
#define PB_SI_PB_LTYPE_SFIXED64(t)
|
||||
#define PB_SI_PB_LTYPE_SINT32(t)
|
||||
#define PB_SI_PB_LTYPE_SINT64(t)
|
||||
#define PB_SI_PB_LTYPE_STRING(t)
|
||||
#define PB_SI_PB_LTYPE_UINT32(t)
|
||||
#define PB_SI_PB_LTYPE_UINT64(t)
|
||||
#define PB_SI_PB_LTYPE_EXTENSION(t)
|
||||
#define PB_SI_PB_LTYPE_FIXED_LENGTH_BYTES(t)
|
||||
#define PB_SUBMSG_DESCRIPTOR(t) &(t ## _msg),
|
||||
|
||||
/* The field descriptors use a variable width format, with width of either
|
||||
* 1, 2, 4 or 8 of 32-bit words. The two lowest bytes of the first byte always
|
||||
* encode the descriptor size, 6 lowest bits of field tag number, and 8 bits
|
||||
* of the field type.
|
||||
*
|
||||
* Descriptor size is encoded as 0 = 1 word, 1 = 2 words, 2 = 4 words, 3 = 8 words.
|
||||
*
|
||||
* Formats, listed starting with the least significant bit of the first word.
|
||||
* 1 word: [2-bit len] [6-bit tag] [8-bit type] [8-bit data_offset] [4-bit size_offset] [4-bit data_size]
|
||||
*
|
||||
* 2 words: [2-bit len] [6-bit tag] [8-bit type] [12-bit array_size] [4-bit size_offset]
|
||||
* [16-bit data_offset] [12-bit data_size] [4-bit tag>>6]
|
||||
*
|
||||
* 4 words: [2-bit len] [6-bit tag] [8-bit type] [16-bit array_size]
|
||||
* [8-bit size_offset] [24-bit tag>>6]
|
||||
* [32-bit data_offset]
|
||||
* [32-bit data_size]
|
||||
*
|
||||
* 8 words: [2-bit len] [6-bit tag] [8-bit type] [16-bit reserved]
|
||||
* [8-bit size_offset] [24-bit tag>>6]
|
||||
* [32-bit data_offset]
|
||||
* [32-bit data_size]
|
||||
* [32-bit array_size]
|
||||
* [32-bit reserved]
|
||||
* [32-bit reserved]
|
||||
* [32-bit reserved]
|
||||
*/
|
||||
|
||||
#define PB_FIELDINFO_1(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
(0 | (((tag) << 2) & 0xFF) | ((type) << 8) | (((uint32_t)(data_offset) & 0xFF) << 16) | \
|
||||
(((uint32_t)(size_offset) & 0x0F) << 24) | (((uint32_t)(data_size) & 0x0F) << 28)),
|
||||
|
||||
#define PB_FIELDINFO_2(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
(1 | (((tag) << 2) & 0xFF) | ((type) << 8) | (((uint32_t)(array_size) & 0xFFF) << 16) | (((uint32_t)(size_offset) & 0x0F) << 28)), \
|
||||
(((uint32_t)(data_offset) & 0xFFFF) | (((uint32_t)(data_size) & 0xFFF) << 16) | (((uint32_t)(tag) & 0x3c0) << 22)),
|
||||
|
||||
#define PB_FIELDINFO_4(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
(2 | (((tag) << 2) & 0xFF) | ((type) << 8) | (((uint32_t)(array_size) & 0xFFFF) << 16)), \
|
||||
((uint32_t)(int_least8_t)(size_offset) | (((uint32_t)(tag) << 2) & 0xFFFFFF00)), \
|
||||
(data_offset), (data_size),
|
||||
|
||||
#define PB_FIELDINFO_8(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
(3 | (((tag) << 2) & 0xFF) | ((type) << 8)), \
|
||||
((uint32_t)(int_least8_t)(size_offset) | (((uint32_t)(tag) << 2) & 0xFFFFFF00)), \
|
||||
(data_offset), (data_size), (array_size), 0, 0, 0,
|
||||
|
||||
/* These assertions verify that the field information fits in the allocated space.
|
||||
* The generator tries to automatically determine the correct width that can fit all
|
||||
* data associated with a message. These asserts will fail only if there has been a
|
||||
* problem in the automatic logic - this may be worth reporting as a bug. As a workaround,
|
||||
* you can increase the descriptor width by defining PB_FIELDINFO_WIDTH or by setting
|
||||
* descriptorsize option in .options file.
|
||||
*/
|
||||
#define PB_FITS(value,bits) ((uint32_t)(value) < ((uint32_t)1<<bits))
|
||||
#define PB_FIELDINFO_ASSERT_1(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_STATIC_ASSERT(PB_FITS(tag,6) && PB_FITS(data_offset,8) && PB_FITS(size_offset,4) && PB_FITS(data_size,4) && PB_FITS(array_size,1), FIELDINFO_DOES_NOT_FIT_width1_field ## tag)
|
||||
|
||||
#define PB_FIELDINFO_ASSERT_2(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_STATIC_ASSERT(PB_FITS(tag,10) && PB_FITS(data_offset,16) && PB_FITS(size_offset,4) && PB_FITS(data_size,12) && PB_FITS(array_size,12), FIELDINFO_DOES_NOT_FIT_width2_field ## tag)
|
||||
|
||||
#ifndef PB_FIELD_32BIT
|
||||
/* Maximum field sizes are still 16-bit if pb_size_t is 16-bit */
|
||||
#define PB_FIELDINFO_ASSERT_4(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_STATIC_ASSERT(PB_FITS(tag,16) && PB_FITS(data_offset,16) && PB_FITS((int_least8_t)size_offset,8) && PB_FITS(data_size,16) && PB_FITS(array_size,16), FIELDINFO_DOES_NOT_FIT_width4_field ## tag)
|
||||
|
||||
#define PB_FIELDINFO_ASSERT_8(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_STATIC_ASSERT(PB_FITS(tag,16) && PB_FITS(data_offset,16) && PB_FITS((int_least8_t)size_offset,8) && PB_FITS(data_size,16) && PB_FITS(array_size,16), FIELDINFO_DOES_NOT_FIT_width8_field ## tag)
|
||||
#else
|
||||
/* Up to 32-bit fields supported.
|
||||
* Note that the checks are against 31 bits to avoid compiler warnings about shift wider than type in the test.
|
||||
* I expect that there is no reasonable use for >2GB messages with nanopb anyway.
|
||||
*/
|
||||
#define PB_FIELDINFO_ASSERT_4(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_STATIC_ASSERT(PB_FITS(tag,30) && PB_FITS(data_offset,31) && PB_FITS(size_offset,8) && PB_FITS(data_size,31) && PB_FITS(array_size,16), FIELDINFO_DOES_NOT_FIT_width4_field ## tag)
|
||||
|
||||
#define PB_FIELDINFO_ASSERT_8(tag, type, data_offset, data_size, size_offset, array_size) \
|
||||
PB_STATIC_ASSERT(PB_FITS(tag,30) && PB_FITS(data_offset,31) && PB_FITS(size_offset,8) && PB_FITS(data_size,31) && PB_FITS(array_size,31), FIELDINFO_DOES_NOT_FIT_width8_field ## tag)
|
||||
#endif
|
||||
|
||||
|
||||
/* Automatic picking of FIELDINFO width:
|
||||
* Uses width 1 when possible, otherwise resorts to width 2.
|
||||
* This is used when PB_BIND() is called with "AUTO" as the argument.
|
||||
* The generator will give explicit size argument when it knows that a message
|
||||
* structure grows beyond 1-word format limits.
|
||||
*/
|
||||
#define PB_FIELDINFO_WIDTH_AUTO(atype, htype, ltype) PB_FI_WIDTH ## atype(htype, ltype)
|
||||
#define PB_FI_WIDTH_PB_ATYPE_STATIC(htype, ltype) PB_FI_WIDTH ## htype(ltype)
|
||||
#define PB_FI_WIDTH_PB_ATYPE_POINTER(htype, ltype) PB_FI_WIDTH ## htype(ltype)
|
||||
#define PB_FI_WIDTH_PB_ATYPE_CALLBACK(htype, ltype) 2
|
||||
#define PB_FI_WIDTH_PB_HTYPE_REQUIRED(ltype) PB_FI_WIDTH ## ltype
|
||||
#define PB_FI_WIDTH_PB_HTYPE_SINGULAR(ltype) PB_FI_WIDTH ## ltype
|
||||
#define PB_FI_WIDTH_PB_HTYPE_OPTIONAL(ltype) PB_FI_WIDTH ## ltype
|
||||
#define PB_FI_WIDTH_PB_HTYPE_ONEOF(ltype) PB_FI_WIDTH ## ltype
|
||||
#define PB_FI_WIDTH_PB_HTYPE_REPEATED(ltype) 2
|
||||
#define PB_FI_WIDTH_PB_HTYPE_FIXARRAY(ltype) 2
|
||||
#define PB_FI_WIDTH_PB_LTYPE_BOOL 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_BYTES 2
|
||||
#define PB_FI_WIDTH_PB_LTYPE_DOUBLE 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_ENUM 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_UENUM 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_FIXED32 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_FIXED64 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_FLOAT 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_INT32 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_INT64 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_MESSAGE 2
|
||||
#define PB_FI_WIDTH_PB_LTYPE_MSG_W_CB 2
|
||||
#define PB_FI_WIDTH_PB_LTYPE_SFIXED32 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_SFIXED64 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_SINT32 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_SINT64 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_STRING 2
|
||||
#define PB_FI_WIDTH_PB_LTYPE_UINT32 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_UINT64 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_EXTENSION 1
|
||||
#define PB_FI_WIDTH_PB_LTYPE_FIXED_LENGTH_BYTES 2
|
||||
|
||||
/* The mapping from protobuf types to LTYPEs is done using these macros. */
|
||||
#define PB_LTYPE_MAP_BOOL PB_LTYPE_BOOL
|
||||
#define PB_LTYPE_MAP_BYTES PB_LTYPE_BYTES
|
||||
#define PB_LTYPE_MAP_DOUBLE PB_LTYPE_FIXED64
|
||||
#define PB_LTYPE_MAP_ENUM PB_LTYPE_VARINT
|
||||
#define PB_LTYPE_MAP_UENUM PB_LTYPE_UVARINT
|
||||
#define PB_LTYPE_MAP_FIXED32 PB_LTYPE_FIXED32
|
||||
#define PB_LTYPE_MAP_FIXED64 PB_LTYPE_FIXED64
|
||||
#define PB_LTYPE_MAP_FLOAT PB_LTYPE_FIXED32
|
||||
#define PB_LTYPE_MAP_INT32 PB_LTYPE_VARINT
|
||||
#define PB_LTYPE_MAP_INT64 PB_LTYPE_VARINT
|
||||
#define PB_LTYPE_MAP_MESSAGE PB_LTYPE_SUBMESSAGE
|
||||
#define PB_LTYPE_MAP_MSG_W_CB PB_LTYPE_SUBMSG_W_CB
|
||||
#define PB_LTYPE_MAP_SFIXED32 PB_LTYPE_FIXED32
|
||||
#define PB_LTYPE_MAP_SFIXED64 PB_LTYPE_FIXED64
|
||||
#define PB_LTYPE_MAP_SINT32 PB_LTYPE_SVARINT
|
||||
#define PB_LTYPE_MAP_SINT64 PB_LTYPE_SVARINT
|
||||
#define PB_LTYPE_MAP_STRING PB_LTYPE_STRING
|
||||
#define PB_LTYPE_MAP_UINT32 PB_LTYPE_UVARINT
|
||||
#define PB_LTYPE_MAP_UINT64 PB_LTYPE_UVARINT
|
||||
#define PB_LTYPE_MAP_EXTENSION PB_LTYPE_EXTENSION
|
||||
#define PB_LTYPE_MAP_FIXED_LENGTH_BYTES PB_LTYPE_FIXED_LENGTH_BYTES
|
||||
|
||||
/* These macros are used for giving out error messages.
|
||||
* They are mostly a debugging aid; the main error information
|
||||
* is the true/false return value from functions.
|
||||
* Some code space can be saved by disabling the error
|
||||
* messages if not used.
|
||||
*
|
||||
* PB_SET_ERROR() sets the error message if none has been set yet.
|
||||
* msg must be a constant string literal.
|
||||
* PB_GET_ERROR() always returns a pointer to a string.
|
||||
* PB_RETURN_ERROR() sets the error and returns false from current
|
||||
* function.
|
||||
*/
|
||||
#ifdef PB_NO_ERRMSG
|
||||
#define PB_SET_ERROR(stream, msg) PB_UNUSED(stream)
|
||||
#define PB_GET_ERROR(stream) "(errmsg disabled)"
|
||||
#else
|
||||
#define PB_SET_ERROR(stream, msg) (stream->errmsg = (stream)->errmsg ? (stream)->errmsg : (msg))
|
||||
#define PB_GET_ERROR(stream) ((stream)->errmsg ? (stream)->errmsg : "(none)")
|
||||
#endif
|
||||
|
||||
#define PB_RETURN_ERROR(stream, msg) return PB_SET_ERROR(stream, msg), false
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
#if __cplusplus >= 201103L
|
||||
#define PB_CONSTEXPR constexpr
|
||||
#else // __cplusplus >= 201103L
|
||||
#define PB_CONSTEXPR
|
||||
#endif // __cplusplus >= 201103L
|
||||
|
||||
#if __cplusplus >= 201703L
|
||||
#define PB_INLINE_CONSTEXPR inline constexpr
|
||||
#else // __cplusplus >= 201703L
|
||||
#define PB_INLINE_CONSTEXPR PB_CONSTEXPR
|
||||
#endif // __cplusplus >= 201703L
|
||||
|
||||
extern "C++"
|
||||
{
|
||||
namespace nanopb {
|
||||
// Each type will be partially specialized by the generator.
|
||||
template <typename GenMessageT> struct MessageDescriptor;
|
||||
} // namespace nanopb
|
||||
}
|
||||
#endif /* __cplusplus */
|
||||
|
||||
#endif
|
||||
|
|
@ -1,388 +0,0 @@
|
|||
/* pb_common.c: Common support functions for pb_encode.c and pb_decode.c.
|
||||
*
|
||||
* 2014 Petteri Aimonen <jpa@kapsi.fi>
|
||||
*/
|
||||
|
||||
#include "nanopb/pb_common.h"
|
||||
|
||||
static bool load_descriptor_values(pb_field_iter_t *iter)
|
||||
{
|
||||
uint32_t word0;
|
||||
uint32_t data_offset;
|
||||
int_least8_t size_offset;
|
||||
|
||||
if (iter->index >= iter->descriptor->field_count)
|
||||
return false;
|
||||
|
||||
word0 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]);
|
||||
iter->type = (pb_type_t)((word0 >> 8) & 0xFF);
|
||||
|
||||
switch(word0 & 3)
|
||||
{
|
||||
case 0: {
|
||||
/* 1-word format */
|
||||
iter->array_size = 1;
|
||||
iter->tag = (pb_size_t)((word0 >> 2) & 0x3F);
|
||||
size_offset = (int_least8_t)((word0 >> 24) & 0x0F);
|
||||
data_offset = (word0 >> 16) & 0xFF;
|
||||
iter->data_size = (pb_size_t)((word0 >> 28) & 0x0F);
|
||||
break;
|
||||
}
|
||||
|
||||
case 1: {
|
||||
/* 2-word format */
|
||||
uint32_t word1 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 1]);
|
||||
|
||||
iter->array_size = (pb_size_t)((word0 >> 16) & 0x0FFF);
|
||||
iter->tag = (pb_size_t)(((word0 >> 2) & 0x3F) | ((word1 >> 28) << 6));
|
||||
size_offset = (int_least8_t)((word0 >> 28) & 0x0F);
|
||||
data_offset = word1 & 0xFFFF;
|
||||
iter->data_size = (pb_size_t)((word1 >> 16) & 0x0FFF);
|
||||
break;
|
||||
}
|
||||
|
||||
case 2: {
|
||||
/* 4-word format */
|
||||
uint32_t word1 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 1]);
|
||||
uint32_t word2 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 2]);
|
||||
uint32_t word3 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 3]);
|
||||
|
||||
iter->array_size = (pb_size_t)(word0 >> 16);
|
||||
iter->tag = (pb_size_t)(((word0 >> 2) & 0x3F) | ((word1 >> 8) << 6));
|
||||
size_offset = (int_least8_t)(word1 & 0xFF);
|
||||
data_offset = word2;
|
||||
iter->data_size = (pb_size_t)word3;
|
||||
break;
|
||||
}
|
||||
|
||||
default: {
|
||||
/* 8-word format */
|
||||
uint32_t word1 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 1]);
|
||||
uint32_t word2 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 2]);
|
||||
uint32_t word3 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 3]);
|
||||
uint32_t word4 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 4]);
|
||||
|
||||
iter->array_size = (pb_size_t)word4;
|
||||
iter->tag = (pb_size_t)(((word0 >> 2) & 0x3F) | ((word1 >> 8) << 6));
|
||||
size_offset = (int_least8_t)(word1 & 0xFF);
|
||||
data_offset = word2;
|
||||
iter->data_size = (pb_size_t)word3;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!iter->message)
|
||||
{
|
||||
/* Avoid doing arithmetic on null pointers, it is undefined */
|
||||
iter->pField = NULL;
|
||||
iter->pSize = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
iter->pField = (char*)iter->message + data_offset;
|
||||
|
||||
if (size_offset)
|
||||
{
|
||||
iter->pSize = (char*)iter->pField - size_offset;
|
||||
}
|
||||
else if (PB_HTYPE(iter->type) == PB_HTYPE_REPEATED &&
|
||||
(PB_ATYPE(iter->type) == PB_ATYPE_STATIC ||
|
||||
PB_ATYPE(iter->type) == PB_ATYPE_POINTER))
|
||||
{
|
||||
/* Fixed count array */
|
||||
iter->pSize = &iter->array_size;
|
||||
}
|
||||
else
|
||||
{
|
||||
iter->pSize = NULL;
|
||||
}
|
||||
|
||||
if (PB_ATYPE(iter->type) == PB_ATYPE_POINTER && iter->pField != NULL)
|
||||
{
|
||||
iter->pData = *(void**)iter->pField;
|
||||
}
|
||||
else
|
||||
{
|
||||
iter->pData = iter->pField;
|
||||
}
|
||||
}
|
||||
|
||||
if (PB_LTYPE_IS_SUBMSG(iter->type))
|
||||
{
|
||||
iter->submsg_desc = iter->descriptor->submsg_info[iter->submessage_index];
|
||||
}
|
||||
else
|
||||
{
|
||||
iter->submsg_desc = NULL;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void advance_iterator(pb_field_iter_t *iter)
|
||||
{
|
||||
iter->index++;
|
||||
|
||||
if (iter->index >= iter->descriptor->field_count)
|
||||
{
|
||||
/* Restart */
|
||||
iter->index = 0;
|
||||
iter->field_info_index = 0;
|
||||
iter->submessage_index = 0;
|
||||
iter->required_field_index = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Increment indexes based on previous field type.
|
||||
* All field info formats have the following fields:
|
||||
* - lowest 2 bits tell the amount of words in the descriptor (2^n words)
|
||||
* - bits 2..7 give the lowest bits of tag number.
|
||||
* - bits 8..15 give the field type.
|
||||
*/
|
||||
uint32_t prev_descriptor = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]);
|
||||
pb_type_t prev_type = (prev_descriptor >> 8) & 0xFF;
|
||||
pb_size_t descriptor_len = (pb_size_t)(1 << (prev_descriptor & 3));
|
||||
|
||||
/* Add to fields.
|
||||
* The cast to pb_size_t is needed to avoid -Wconversion warning.
|
||||
* Because the data is is constants from generator, there is no danger of overflow.
|
||||
*/
|
||||
iter->field_info_index = (pb_size_t)(iter->field_info_index + descriptor_len);
|
||||
iter->required_field_index = (pb_size_t)(iter->required_field_index + (PB_HTYPE(prev_type) == PB_HTYPE_REQUIRED));
|
||||
iter->submessage_index = (pb_size_t)(iter->submessage_index + PB_LTYPE_IS_SUBMSG(prev_type));
|
||||
}
|
||||
}
|
||||
|
||||
bool pb_field_iter_begin(pb_field_iter_t *iter, const pb_msgdesc_t *desc, void *message)
|
||||
{
|
||||
memset(iter, 0, sizeof(*iter));
|
||||
|
||||
iter->descriptor = desc;
|
||||
iter->message = message;
|
||||
|
||||
return load_descriptor_values(iter);
|
||||
}
|
||||
|
||||
bool pb_field_iter_begin_extension(pb_field_iter_t *iter, pb_extension_t *extension)
|
||||
{
|
||||
const pb_msgdesc_t *msg = (const pb_msgdesc_t*)extension->type->arg;
|
||||
bool status;
|
||||
|
||||
uint32_t word0 = PB_PROGMEM_READU32(msg->field_info[0]);
|
||||
if (PB_ATYPE(word0 >> 8) == PB_ATYPE_POINTER)
|
||||
{
|
||||
/* For pointer extensions, the pointer is stored directly
|
||||
* in the extension structure. This avoids having an extra
|
||||
* indirection. */
|
||||
status = pb_field_iter_begin(iter, msg, &extension->dest);
|
||||
}
|
||||
else
|
||||
{
|
||||
status = pb_field_iter_begin(iter, msg, extension->dest);
|
||||
}
|
||||
|
||||
iter->pSize = &extension->found;
|
||||
return status;
|
||||
}
|
||||
|
||||
bool pb_field_iter_next(pb_field_iter_t *iter)
|
||||
{
|
||||
advance_iterator(iter);
|
||||
(void)load_descriptor_values(iter);
|
||||
return iter->index != 0;
|
||||
}
|
||||
|
||||
bool pb_field_iter_find(pb_field_iter_t *iter, uint32_t tag)
|
||||
{
|
||||
if (iter->tag == tag)
|
||||
{
|
||||
return true; /* Nothing to do, correct field already. */
|
||||
}
|
||||
else if (tag > iter->descriptor->largest_tag)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
pb_size_t start = iter->index;
|
||||
uint32_t fieldinfo;
|
||||
|
||||
if (tag < iter->tag)
|
||||
{
|
||||
/* Fields are in tag number order, so we know that tag is between
|
||||
* 0 and our start position. Setting index to end forces
|
||||
* advance_iterator() call below to restart from beginning. */
|
||||
iter->index = iter->descriptor->field_count;
|
||||
}
|
||||
|
||||
do
|
||||
{
|
||||
/* Advance iterator but don't load values yet */
|
||||
advance_iterator(iter);
|
||||
|
||||
/* Do fast check for tag number match */
|
||||
fieldinfo = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]);
|
||||
|
||||
if (((fieldinfo >> 2) & 0x3F) == (tag & 0x3F))
|
||||
{
|
||||
/* Good candidate, check further */
|
||||
(void)load_descriptor_values(iter);
|
||||
|
||||
if (iter->tag == tag &&
|
||||
PB_LTYPE(iter->type) != PB_LTYPE_EXTENSION)
|
||||
{
|
||||
/* Found it */
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} while (iter->index != start);
|
||||
|
||||
/* Searched all the way back to start, and found nothing. */
|
||||
(void)load_descriptor_values(iter);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool pb_field_iter_find_extension(pb_field_iter_t *iter)
|
||||
{
|
||||
if (PB_LTYPE(iter->type) == PB_LTYPE_EXTENSION)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
pb_size_t start = iter->index;
|
||||
uint32_t fieldinfo;
|
||||
|
||||
do
|
||||
{
|
||||
/* Advance iterator but don't load values yet */
|
||||
advance_iterator(iter);
|
||||
|
||||
/* Do fast check for field type */
|
||||
fieldinfo = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]);
|
||||
|
||||
if (PB_LTYPE((fieldinfo >> 8) & 0xFF) == PB_LTYPE_EXTENSION)
|
||||
{
|
||||
return load_descriptor_values(iter);
|
||||
}
|
||||
} while (iter->index != start);
|
||||
|
||||
/* Searched all the way back to start, and found nothing. */
|
||||
(void)load_descriptor_values(iter);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void *pb_const_cast(const void *p)
|
||||
{
|
||||
/* Note: this casts away const, in order to use the common field iterator
|
||||
* logic for both encoding and decoding. The cast is done using union
|
||||
* to avoid spurious compiler warnings. */
|
||||
union {
|
||||
void *p1;
|
||||
const void *p2;
|
||||
} t;
|
||||
t.p2 = p;
|
||||
return t.p1;
|
||||
}
|
||||
|
||||
bool pb_field_iter_begin_const(pb_field_iter_t *iter, const pb_msgdesc_t *desc, const void *message)
|
||||
{
|
||||
return pb_field_iter_begin(iter, desc, pb_const_cast(message));
|
||||
}
|
||||
|
||||
bool pb_field_iter_begin_extension_const(pb_field_iter_t *iter, const pb_extension_t *extension)
|
||||
{
|
||||
return pb_field_iter_begin_extension(iter, (pb_extension_t*)pb_const_cast(extension));
|
||||
}
|
||||
|
||||
bool pb_default_field_callback(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_t *field)
|
||||
{
|
||||
if (field->data_size == sizeof(pb_callback_t))
|
||||
{
|
||||
pb_callback_t *pCallback = (pb_callback_t*)field->pData;
|
||||
|
||||
if (pCallback != NULL)
|
||||
{
|
||||
if (istream != NULL && pCallback->funcs.decode != NULL)
|
||||
{
|
||||
return pCallback->funcs.decode(istream, field, &pCallback->arg);
|
||||
}
|
||||
|
||||
if (ostream != NULL && pCallback->funcs.encode != NULL)
|
||||
{
|
||||
return pCallback->funcs.encode(ostream, field, &pCallback->arg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true; /* Success, but didn't do anything */
|
||||
|
||||
}
|
||||
|
||||
#ifdef PB_VALIDATE_UTF8
|
||||
|
||||
/* This function checks whether a string is valid UTF-8 text.
|
||||
*
|
||||
* Algorithm is adapted from https://www.cl.cam.ac.uk/~mgk25/ucs/utf8_check.c
|
||||
* Original copyright: Markus Kuhn <http://www.cl.cam.ac.uk/~mgk25/> 2005-03-30
|
||||
* Licensed under "Short code license", which allows use under MIT license or
|
||||
* any compatible with it.
|
||||
*/
|
||||
|
||||
bool pb_validate_utf8(const char *str)
|
||||
{
|
||||
const pb_byte_t *s = (const pb_byte_t*)str;
|
||||
while (*s)
|
||||
{
|
||||
if (*s < 0x80)
|
||||
{
|
||||
/* 0xxxxxxx */
|
||||
s++;
|
||||
}
|
||||
else if ((s[0] & 0xe0) == 0xc0)
|
||||
{
|
||||
/* 110XXXXx 10xxxxxx */
|
||||
if ((s[1] & 0xc0) != 0x80 ||
|
||||
(s[0] & 0xfe) == 0xc0) /* overlong? */
|
||||
return false;
|
||||
else
|
||||
s += 2;
|
||||
}
|
||||
else if ((s[0] & 0xf0) == 0xe0)
|
||||
{
|
||||
/* 1110XXXX 10Xxxxxx 10xxxxxx */
|
||||
if ((s[1] & 0xc0) != 0x80 ||
|
||||
(s[2] & 0xc0) != 0x80 ||
|
||||
(s[0] == 0xe0 && (s[1] & 0xe0) == 0x80) || /* overlong? */
|
||||
(s[0] == 0xed && (s[1] & 0xe0) == 0xa0) || /* surrogate? */
|
||||
(s[0] == 0xef && s[1] == 0xbf &&
|
||||
(s[2] & 0xfe) == 0xbe)) /* U+FFFE or U+FFFF? */
|
||||
return false;
|
||||
else
|
||||
s += 3;
|
||||
}
|
||||
else if ((s[0] & 0xf8) == 0xf0)
|
||||
{
|
||||
/* 11110XXX 10XXxxxx 10xxxxxx 10xxxxxx */
|
||||
if ((s[1] & 0xc0) != 0x80 ||
|
||||
(s[2] & 0xc0) != 0x80 ||
|
||||
(s[3] & 0xc0) != 0x80 ||
|
||||
(s[0] == 0xf0 && (s[1] & 0xf0) == 0x80) || /* overlong? */
|
||||
(s[0] == 0xf4 && s[1] > 0x8f) || s[0] > 0xf4) /* > U+10FFFF? */
|
||||
return false;
|
||||
else
|
||||
s += 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
|
@ -1,49 +0,0 @@
|
|||
/* pb_common.h: Common support functions for pb_encode.c and pb_decode.c.
|
||||
* These functions are rarely needed by applications directly.
|
||||
*/
|
||||
|
||||
#ifndef PB_COMMON_H_INCLUDED
|
||||
#define PB_COMMON_H_INCLUDED
|
||||
|
||||
#include "nanopb/pb.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Initialize the field iterator structure to beginning.
|
||||
* Returns false if the message type is empty. */
|
||||
bool pb_field_iter_begin(pb_field_iter_t *iter, const pb_msgdesc_t *desc, void *message);
|
||||
|
||||
/* Get a field iterator for extension field. */
|
||||
bool pb_field_iter_begin_extension(pb_field_iter_t *iter, pb_extension_t *extension);
|
||||
|
||||
/* Same as pb_field_iter_begin(), but for const message pointer.
|
||||
* Note that the pointers in pb_field_iter_t will be non-const but shouldn't
|
||||
* be written to when using these functions. */
|
||||
bool pb_field_iter_begin_const(pb_field_iter_t *iter, const pb_msgdesc_t *desc, const void *message);
|
||||
bool pb_field_iter_begin_extension_const(pb_field_iter_t *iter, const pb_extension_t *extension);
|
||||
|
||||
/* Advance the iterator to the next field.
|
||||
* Returns false when the iterator wraps back to the first field. */
|
||||
bool pb_field_iter_next(pb_field_iter_t *iter);
|
||||
|
||||
/* Advance the iterator until it points at a field with the given tag.
|
||||
* Returns false if no such field exists. */
|
||||
bool pb_field_iter_find(pb_field_iter_t *iter, uint32_t tag);
|
||||
|
||||
/* Find a field with type PB_LTYPE_EXTENSION, or return false if not found.
|
||||
* There can be only one extension range field per message. */
|
||||
bool pb_field_iter_find_extension(pb_field_iter_t *iter);
|
||||
|
||||
#ifdef PB_VALIDATE_UTF8
|
||||
/* Validate UTF-8 text string */
|
||||
bool pb_validate_utf8(const char *s);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,193 +0,0 @@
|
|||
/* pb_decode.h: Functions to decode protocol buffers. Depends on pb_decode.c.
|
||||
* The main function is pb_decode. You also need an input stream, and the
|
||||
* field descriptions created by nanopb_generator.py.
|
||||
*/
|
||||
|
||||
#ifndef PB_DECODE_H_INCLUDED
|
||||
#define PB_DECODE_H_INCLUDED
|
||||
|
||||
#include "nanopb/pb.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Structure for defining custom input streams. You will need to provide
|
||||
* a callback function to read the bytes from your storage, which can be
|
||||
* for example a file or a network socket.
|
||||
*
|
||||
* The callback must conform to these rules:
|
||||
*
|
||||
* 1) Return false on IO errors. This will cause decoding to abort.
|
||||
* 2) You can use state to store your own data (e.g. buffer pointer),
|
||||
* and rely on pb_read to verify that no-body reads past bytes_left.
|
||||
* 3) Your callback may be used with substreams, in which case bytes_left
|
||||
* is different than from the main stream. Don't use bytes_left to compute
|
||||
* any pointers.
|
||||
*/
|
||||
struct pb_istream_s
|
||||
{
|
||||
#ifdef PB_BUFFER_ONLY
|
||||
/* Callback pointer is not used in buffer-only configuration.
|
||||
* Having an int pointer here allows binary compatibility but
|
||||
* gives an error if someone tries to assign callback function.
|
||||
*/
|
||||
int *callback;
|
||||
#else
|
||||
bool (*callback)(pb_istream_t *stream, pb_byte_t *buf, size_t count);
|
||||
#endif
|
||||
|
||||
void *state; /* Free field for use by callback implementation */
|
||||
size_t bytes_left;
|
||||
|
||||
#ifndef PB_NO_ERRMSG
|
||||
const char *errmsg;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifndef PB_NO_ERRMSG
|
||||
#define PB_ISTREAM_EMPTY {0,0,0,0}
|
||||
#else
|
||||
#define PB_ISTREAM_EMPTY {0,0,0}
|
||||
#endif
|
||||
|
||||
/***************************
|
||||
* Main decoding functions *
|
||||
***************************/
|
||||
|
||||
/* Decode a single protocol buffers message from input stream into a C structure.
|
||||
* Returns true on success, false on any failure.
|
||||
* The actual struct pointed to by dest must match the description in fields.
|
||||
* Callback fields of the destination structure must be initialized by caller.
|
||||
* All other fields will be initialized by this function.
|
||||
*
|
||||
* Example usage:
|
||||
* MyMessage msg = {};
|
||||
* uint8_t buffer[64];
|
||||
* pb_istream_t stream;
|
||||
*
|
||||
* // ... read some data into buffer ...
|
||||
*
|
||||
* stream = pb_istream_from_buffer(buffer, count);
|
||||
* pb_decode(&stream, MyMessage_fields, &msg);
|
||||
*/
|
||||
bool pb_decode(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_struct);
|
||||
|
||||
/* Extended version of pb_decode, with several options to control
|
||||
* the decoding process:
|
||||
*
|
||||
* PB_DECODE_NOINIT: Do not initialize the fields to default values.
|
||||
* This is slightly faster if you do not need the default
|
||||
* values and instead initialize the structure to 0 using
|
||||
* e.g. memset(). This can also be used for merging two
|
||||
* messages, i.e. combine already existing data with new
|
||||
* values.
|
||||
*
|
||||
* PB_DECODE_DELIMITED: Input message starts with the message size as varint.
|
||||
* Corresponds to parseDelimitedFrom() in Google's
|
||||
* protobuf API.
|
||||
*
|
||||
* PB_DECODE_NULLTERMINATED: Stop reading when field tag is read as 0. This allows
|
||||
* reading null terminated messages.
|
||||
* NOTE: Until nanopb-0.4.0, pb_decode() also allows
|
||||
* null-termination. This behaviour is not supported in
|
||||
* most other protobuf implementations, so PB_DECODE_DELIMITED
|
||||
* is a better option for compatibility.
|
||||
*
|
||||
* Multiple flags can be combined with bitwise or (| operator)
|
||||
*/
|
||||
#define PB_DECODE_NOINIT 0x01U
|
||||
#define PB_DECODE_DELIMITED 0x02U
|
||||
#define PB_DECODE_NULLTERMINATED 0x04U
|
||||
bool pb_decode_ex(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_struct, unsigned int flags);
|
||||
|
||||
/* Defines for backwards compatibility with code written before nanopb-0.4.0 */
|
||||
#define pb_decode_noinit(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_NOINIT)
|
||||
#define pb_decode_delimited(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_DELIMITED)
|
||||
#define pb_decode_delimited_noinit(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_DELIMITED | PB_DECODE_NOINIT)
|
||||
#define pb_decode_nullterminated(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_NULLTERMINATED)
|
||||
|
||||
/* Release any allocated pointer fields. If you use dynamic allocation, you should
|
||||
* call this for any successfully decoded message when you are done with it. If
|
||||
* pb_decode() returns with an error, the message is already released.
|
||||
*/
|
||||
void pb_release(const pb_msgdesc_t *fields, void *dest_struct);
|
||||
|
||||
/**************************************
|
||||
* Functions for manipulating streams *
|
||||
**************************************/
|
||||
|
||||
/* Create an input stream for reading from a memory buffer.
|
||||
*
|
||||
* msglen should be the actual length of the message, not the full size of
|
||||
* allocated buffer.
|
||||
*
|
||||
* Alternatively, you can use a custom stream that reads directly from e.g.
|
||||
* a file or a network socket.
|
||||
*/
|
||||
pb_istream_t pb_istream_from_buffer(const pb_byte_t *buf, size_t msglen);
|
||||
|
||||
/* Function to read from a pb_istream_t. You can use this if you need to
|
||||
* read some custom header data, or to read data in field callbacks.
|
||||
*/
|
||||
bool pb_read(pb_istream_t *stream, pb_byte_t *buf, size_t count);
|
||||
|
||||
|
||||
/************************************************
|
||||
* Helper functions for writing field callbacks *
|
||||
************************************************/
|
||||
|
||||
/* Decode the tag for the next field in the stream. Gives the wire type and
|
||||
* field tag. At end of the message, returns false and sets eof to true. */
|
||||
bool pb_decode_tag(pb_istream_t *stream, pb_wire_type_t *wire_type, uint32_t *tag, bool *eof);
|
||||
|
||||
/* Skip the field payload data, given the wire type. */
|
||||
bool pb_skip_field(pb_istream_t *stream, pb_wire_type_t wire_type);
|
||||
|
||||
/* Decode an integer in the varint format. This works for enum, int32,
|
||||
* int64, uint32 and uint64 field types. */
|
||||
#ifndef PB_WITHOUT_64BIT
|
||||
bool pb_decode_varint(pb_istream_t *stream, uint64_t *dest);
|
||||
#else
|
||||
#define pb_decode_varint pb_decode_varint32
|
||||
#endif
|
||||
|
||||
/* Decode an integer in the varint format. This works for enum, int32,
|
||||
* and uint32 field types. */
|
||||
bool pb_decode_varint32(pb_istream_t *stream, uint32_t *dest);
|
||||
|
||||
/* Decode a bool value in varint format. */
|
||||
bool pb_decode_bool(pb_istream_t *stream, bool *dest);
|
||||
|
||||
/* Decode an integer in the zig-zagged svarint format. This works for sint32
|
||||
* and sint64. */
|
||||
#ifndef PB_WITHOUT_64BIT
|
||||
bool pb_decode_svarint(pb_istream_t *stream, int64_t *dest);
|
||||
#else
|
||||
bool pb_decode_svarint(pb_istream_t *stream, int32_t *dest);
|
||||
#endif
|
||||
|
||||
/* Decode a fixed32, sfixed32 or float value. You need to pass a pointer to
|
||||
* a 4-byte wide C variable. */
|
||||
bool pb_decode_fixed32(pb_istream_t *stream, void *dest);
|
||||
|
||||
#ifndef PB_WITHOUT_64BIT
|
||||
/* Decode a fixed64, sfixed64 or double value. You need to pass a pointer to
|
||||
* a 8-byte wide C variable. */
|
||||
bool pb_decode_fixed64(pb_istream_t *stream, void *dest);
|
||||
#endif
|
||||
|
||||
#ifdef PB_CONVERT_DOUBLE_FLOAT
|
||||
/* Decode a double value into float variable. */
|
||||
bool pb_decode_double_as_float(pb_istream_t *stream, float *dest);
|
||||
#endif
|
||||
|
||||
/* Make a limited-length substream for reading a PB_WT_STRING field. */
|
||||
bool pb_make_string_substream(pb_istream_t *stream, pb_istream_t *substream);
|
||||
bool pb_close_string_substream(pb_istream_t *stream, pb_istream_t *substream);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,185 +0,0 @@
|
|||
/* pb_encode.h: Functions to encode protocol buffers. Depends on pb_encode.c.
|
||||
* The main function is pb_encode. You also need an output stream, and the
|
||||
* field descriptions created by nanopb_generator.py.
|
||||
*/
|
||||
|
||||
#ifndef PB_ENCODE_H_INCLUDED
|
||||
#define PB_ENCODE_H_INCLUDED
|
||||
|
||||
#include "nanopb/pb.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Structure for defining custom output streams. You will need to provide
|
||||
* a callback function to write the bytes to your storage, which can be
|
||||
* for example a file or a network socket.
|
||||
*
|
||||
* The callback must conform to these rules:
|
||||
*
|
||||
* 1) Return false on IO errors. This will cause encoding to abort.
|
||||
* 2) You can use state to store your own data (e.g. buffer pointer).
|
||||
* 3) pb_write will update bytes_written after your callback runs.
|
||||
* 4) Substreams will modify max_size and bytes_written. Don't use them
|
||||
* to calculate any pointers.
|
||||
*/
|
||||
struct pb_ostream_s
|
||||
{
|
||||
#ifdef PB_BUFFER_ONLY
|
||||
/* Callback pointer is not used in buffer-only configuration.
|
||||
* Having an int pointer here allows binary compatibility but
|
||||
* gives an error if someone tries to assign callback function.
|
||||
* Also, NULL pointer marks a 'sizing stream' that does not
|
||||
* write anything.
|
||||
*/
|
||||
const int *callback;
|
||||
#else
|
||||
bool (*callback)(pb_ostream_t *stream, const pb_byte_t *buf, size_t count);
|
||||
#endif
|
||||
void *state; /* Free field for use by callback implementation. */
|
||||
size_t max_size; /* Limit number of output bytes written (or use SIZE_MAX). */
|
||||
size_t bytes_written; /* Number of bytes written so far. */
|
||||
|
||||
#ifndef PB_NO_ERRMSG
|
||||
const char *errmsg;
|
||||
#endif
|
||||
};
|
||||
|
||||
/***************************
|
||||
* Main encoding functions *
|
||||
***************************/
|
||||
|
||||
/* Encode a single protocol buffers message from C structure into a stream.
|
||||
* Returns true on success, false on any failure.
|
||||
* The actual struct pointed to by src_struct must match the description in fields.
|
||||
* All required fields in the struct are assumed to have been filled in.
|
||||
*
|
||||
* Example usage:
|
||||
* MyMessage msg = {};
|
||||
* uint8_t buffer[64];
|
||||
* pb_ostream_t stream;
|
||||
*
|
||||
* msg.field1 = 42;
|
||||
* stream = pb_ostream_from_buffer(buffer, sizeof(buffer));
|
||||
* pb_encode(&stream, MyMessage_fields, &msg);
|
||||
*/
|
||||
bool pb_encode(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct);
|
||||
|
||||
/* Extended version of pb_encode, with several options to control the
|
||||
* encoding process:
|
||||
*
|
||||
* PB_ENCODE_DELIMITED: Prepend the length of message as a varint.
|
||||
* Corresponds to writeDelimitedTo() in Google's
|
||||
* protobuf API.
|
||||
*
|
||||
* PB_ENCODE_NULLTERMINATED: Append a null byte to the message for termination.
|
||||
* NOTE: This behaviour is not supported in most other
|
||||
* protobuf implementations, so PB_ENCODE_DELIMITED
|
||||
* is a better option for compatibility.
|
||||
*/
|
||||
#define PB_ENCODE_DELIMITED 0x02U
|
||||
#define PB_ENCODE_NULLTERMINATED 0x04U
|
||||
bool pb_encode_ex(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct, unsigned int flags);
|
||||
|
||||
/* Defines for backwards compatibility with code written before nanopb-0.4.0 */
|
||||
#define pb_encode_delimited(s,f,d) pb_encode_ex(s,f,d, PB_ENCODE_DELIMITED)
|
||||
#define pb_encode_nullterminated(s,f,d) pb_encode_ex(s,f,d, PB_ENCODE_NULLTERMINATED)
|
||||
|
||||
/* Encode the message to get the size of the encoded data, but do not store
|
||||
* the data. */
|
||||
bool pb_get_encoded_size(size_t *size, const pb_msgdesc_t *fields, const void *src_struct);
|
||||
|
||||
/**************************************
|
||||
* Functions for manipulating streams *
|
||||
**************************************/
|
||||
|
||||
/* Create an output stream for writing into a memory buffer.
|
||||
* The number of bytes written can be found in stream.bytes_written after
|
||||
* encoding the message.
|
||||
*
|
||||
* Alternatively, you can use a custom stream that writes directly to e.g.
|
||||
* a file or a network socket.
|
||||
*/
|
||||
pb_ostream_t pb_ostream_from_buffer(pb_byte_t *buf, size_t bufsize);
|
||||
|
||||
/* Pseudo-stream for measuring the size of a message without actually storing
|
||||
* the encoded data.
|
||||
*
|
||||
* Example usage:
|
||||
* MyMessage msg = {};
|
||||
* pb_ostream_t stream = PB_OSTREAM_SIZING;
|
||||
* pb_encode(&stream, MyMessage_fields, &msg);
|
||||
* printf("Message size is %d\n", stream.bytes_written);
|
||||
*/
|
||||
#ifndef PB_NO_ERRMSG
|
||||
#define PB_OSTREAM_SIZING {0,0,0,0,0}
|
||||
#else
|
||||
#define PB_OSTREAM_SIZING {0,0,0,0}
|
||||
#endif
|
||||
|
||||
/* Function to write into a pb_ostream_t stream. You can use this if you need
|
||||
* to append or prepend some custom headers to the message.
|
||||
*/
|
||||
bool pb_write(pb_ostream_t *stream, const pb_byte_t *buf, size_t count);
|
||||
|
||||
|
||||
/************************************************
|
||||
* Helper functions for writing field callbacks *
|
||||
************************************************/
|
||||
|
||||
/* Encode field header based on type and field number defined in the field
|
||||
* structure. Call this from the callback before writing out field contents. */
|
||||
bool pb_encode_tag_for_field(pb_ostream_t *stream, const pb_field_iter_t *field);
|
||||
|
||||
/* Encode field header by manually specifying wire type. You need to use this
|
||||
* if you want to write out packed arrays from a callback field. */
|
||||
bool pb_encode_tag(pb_ostream_t *stream, pb_wire_type_t wiretype, uint32_t field_number);
|
||||
|
||||
/* Encode an integer in the varint format.
|
||||
* This works for bool, enum, int32, int64, uint32 and uint64 field types. */
|
||||
#ifndef PB_WITHOUT_64BIT
|
||||
bool pb_encode_varint(pb_ostream_t *stream, uint64_t value);
|
||||
#else
|
||||
bool pb_encode_varint(pb_ostream_t *stream, uint32_t value);
|
||||
#endif
|
||||
|
||||
/* Encode an integer in the zig-zagged svarint format.
|
||||
* This works for sint32 and sint64. */
|
||||
#ifndef PB_WITHOUT_64BIT
|
||||
bool pb_encode_svarint(pb_ostream_t *stream, int64_t value);
|
||||
#else
|
||||
bool pb_encode_svarint(pb_ostream_t *stream, int32_t value);
|
||||
#endif
|
||||
|
||||
/* Encode a string or bytes type field. For strings, pass strlen(s) as size. */
|
||||
bool pb_encode_string(pb_ostream_t *stream, const pb_byte_t *buffer, size_t size);
|
||||
|
||||
/* Encode a fixed32, sfixed32 or float value.
|
||||
* You need to pass a pointer to a 4-byte wide C variable. */
|
||||
bool pb_encode_fixed32(pb_ostream_t *stream, const void *value);
|
||||
|
||||
#ifndef PB_WITHOUT_64BIT
|
||||
/* Encode a fixed64, sfixed64 or double value.
|
||||
* You need to pass a pointer to a 8-byte wide C variable. */
|
||||
bool pb_encode_fixed64(pb_ostream_t *stream, const void *value);
|
||||
#endif
|
||||
|
||||
#ifdef PB_CONVERT_DOUBLE_FLOAT
|
||||
/* Encode a float value so that it appears like a double in the encoded
|
||||
* message. */
|
||||
bool pb_encode_float_as_double(pb_ostream_t *stream, float value);
|
||||
#endif
|
||||
|
||||
/* Encode a submessage field.
|
||||
* You need to pass the pb_field_t array and pointer to struct, just like
|
||||
* with pb_encode(). This internally encodes the submessage twice, first to
|
||||
* calculate message size and then to actually write it out.
|
||||
*/
|
||||
bool pb_encode_submessage(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
/* Automatically generated nanopb constant definitions */
|
||||
/* Generated by nanopb-0.4.8-dev */
|
||||
|
||||
#include "opentelemetry/common.pb.h"
|
||||
#if PB_PROTO_HEADER_VERSION != 40
|
||||
#error Regenerate this file with the current version of nanopb generator.
|
||||
#endif
|
||||
|
||||
PB_BIND(opentelemetry_proto_common_v1_AnyValue, opentelemetry_proto_common_v1_AnyValue, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_common_v1_ArrayValue, opentelemetry_proto_common_v1_ArrayValue, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_common_v1_KeyValueList, opentelemetry_proto_common_v1_KeyValueList, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_common_v1_KeyValue, opentelemetry_proto_common_v1_KeyValue, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_common_v1_InstrumentationScope, opentelemetry_proto_common_v1_InstrumentationScope, AUTO)
|
||||
|
||||
|
||||
|
||||
#ifndef PB_CONVERT_DOUBLE_FLOAT
|
||||
/* On some platforms (such as AVR), double is really float.
|
||||
* To be able to encode/decode double on these platforms, you need.
|
||||
* to define PB_CONVERT_DOUBLE_FLOAT in pb.h or compiler command line.
|
||||
*/
|
||||
PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES)
|
||||
#endif
|
||||
|
||||
|
|
@ -1,170 +0,0 @@
|
|||
/* Automatically generated nanopb header */
|
||||
/* Generated by nanopb-0.4.8-dev */
|
||||
|
||||
#ifndef PB_OPENTELEMETRY_PROTO_COMMON_V1_OPENTELEMETRY_PROTO_COMMON_V1_COMMON_PB_H_INCLUDED
|
||||
#define PB_OPENTELEMETRY_PROTO_COMMON_V1_OPENTELEMETRY_PROTO_COMMON_V1_COMMON_PB_H_INCLUDED
|
||||
#include <nanopb/pb.h>
|
||||
|
||||
#if PB_PROTO_HEADER_VERSION != 40
|
||||
#error Regenerate this file with the current version of nanopb generator.
|
||||
#endif
|
||||
|
||||
/* Struct definitions */
|
||||
/* ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
|
||||
since oneof in AnyValue does not allow repeated fields. */
|
||||
typedef struct _opentelemetry_proto_common_v1_ArrayValue {
|
||||
/* Array of values. The array may be empty (contain 0 elements). */
|
||||
pb_callback_t values;
|
||||
} opentelemetry_proto_common_v1_ArrayValue;
|
||||
|
||||
/* KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
|
||||
since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
|
||||
a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
|
||||
avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
|
||||
are semantically equivalent. */
|
||||
typedef struct _opentelemetry_proto_common_v1_KeyValueList {
|
||||
/* A collection of key/value pairs of key-value pairs. The list may be empty (may
|
||||
contain 0 elements).
|
||||
The keys MUST be unique (it is not allowed to have more than one
|
||||
value with the same key). */
|
||||
pb_callback_t values;
|
||||
} opentelemetry_proto_common_v1_KeyValueList;
|
||||
|
||||
/* AnyValue is used to represent any type of attribute value. AnyValue may contain a
|
||||
primitive value such as a string or integer or it may contain an arbitrary nested
|
||||
object containing arrays, key-value lists and primitives. */
|
||||
typedef struct _opentelemetry_proto_common_v1_AnyValue {
|
||||
pb_size_t which_value;
|
||||
union {
|
||||
pb_callback_t string_value;
|
||||
bool bool_value;
|
||||
int64_t int_value;
|
||||
double double_value;
|
||||
opentelemetry_proto_common_v1_ArrayValue array_value;
|
||||
opentelemetry_proto_common_v1_KeyValueList kvlist_value;
|
||||
pb_callback_t bytes_value;
|
||||
} value;
|
||||
} opentelemetry_proto_common_v1_AnyValue;
|
||||
|
||||
/* KeyValue is a key-value pair that is used to store Span attributes, Link
|
||||
attributes, etc. */
|
||||
typedef struct _opentelemetry_proto_common_v1_KeyValue {
|
||||
pb_callback_t key;
|
||||
bool has_value;
|
||||
opentelemetry_proto_common_v1_AnyValue value;
|
||||
} opentelemetry_proto_common_v1_KeyValue;
|
||||
|
||||
/* InstrumentationScope is a message representing the instrumentation scope information
|
||||
such as the fully qualified name and version. */
|
||||
typedef struct _opentelemetry_proto_common_v1_InstrumentationScope {
|
||||
/* An empty instrumentation scope name means the name is unknown. */
|
||||
pb_callback_t name;
|
||||
pb_callback_t version;
|
||||
/* Additional attributes that describe the scope. [Optional].
|
||||
Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
attribute with the same key). */
|
||||
pb_callback_t attributes;
|
||||
uint32_t dropped_attributes_count;
|
||||
} opentelemetry_proto_common_v1_InstrumentationScope;
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Initializer values for message structs */
|
||||
#define opentelemetry_proto_common_v1_AnyValue_init_default {0, {{{NULL}, NULL}}}
|
||||
#define opentelemetry_proto_common_v1_ArrayValue_init_default {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_common_v1_KeyValueList_init_default {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_common_v1_KeyValue_init_default {{{NULL}, NULL}, false, opentelemetry_proto_common_v1_AnyValue_init_default}
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_init_default {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0}
|
||||
#define opentelemetry_proto_common_v1_AnyValue_init_zero {0, {{{NULL}, NULL}}}
|
||||
#define opentelemetry_proto_common_v1_ArrayValue_init_zero {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_common_v1_KeyValueList_init_zero {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_common_v1_KeyValue_init_zero {{{NULL}, NULL}, false, opentelemetry_proto_common_v1_AnyValue_init_zero}
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0}
|
||||
|
||||
/* Field tags (for use in manual encoding/decoding) */
|
||||
#define opentelemetry_proto_common_v1_ArrayValue_values_tag 1
|
||||
#define opentelemetry_proto_common_v1_KeyValueList_values_tag 1
|
||||
#define opentelemetry_proto_common_v1_AnyValue_string_value_tag 1
|
||||
#define opentelemetry_proto_common_v1_AnyValue_bool_value_tag 2
|
||||
#define opentelemetry_proto_common_v1_AnyValue_int_value_tag 3
|
||||
#define opentelemetry_proto_common_v1_AnyValue_double_value_tag 4
|
||||
#define opentelemetry_proto_common_v1_AnyValue_array_value_tag 5
|
||||
#define opentelemetry_proto_common_v1_AnyValue_kvlist_value_tag 6
|
||||
#define opentelemetry_proto_common_v1_AnyValue_bytes_value_tag 7
|
||||
#define opentelemetry_proto_common_v1_KeyValue_key_tag 1
|
||||
#define opentelemetry_proto_common_v1_KeyValue_value_tag 2
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_name_tag 1
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_version_tag 2
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_attributes_tag 3
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_dropped_attributes_count_tag 4
|
||||
|
||||
/* Struct field encoding specification for nanopb */
|
||||
#define opentelemetry_proto_common_v1_AnyValue_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, ONEOF, STRING, (value,string_value,value.string_value), 1) \
|
||||
X(a, STATIC, ONEOF, BOOL, (value,bool_value,value.bool_value), 2) \
|
||||
X(a, STATIC, ONEOF, INT64, (value,int_value,value.int_value), 3) \
|
||||
X(a, STATIC, ONEOF, DOUBLE, (value,double_value,value.double_value), 4) \
|
||||
X(a, STATIC, ONEOF, MESSAGE, (value,array_value,value.array_value), 5) \
|
||||
X(a, STATIC, ONEOF, MESSAGE, (value,kvlist_value,value.kvlist_value), 6) \
|
||||
X(a, CALLBACK, ONEOF, BYTES, (value,bytes_value,value.bytes_value), 7)
|
||||
#define opentelemetry_proto_common_v1_AnyValue_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_common_v1_AnyValue_DEFAULT NULL
|
||||
#define opentelemetry_proto_common_v1_AnyValue_value_array_value_MSGTYPE opentelemetry_proto_common_v1_ArrayValue
|
||||
#define opentelemetry_proto_common_v1_AnyValue_value_kvlist_value_MSGTYPE opentelemetry_proto_common_v1_KeyValueList
|
||||
|
||||
#define opentelemetry_proto_common_v1_ArrayValue_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, values, 1)
|
||||
#define opentelemetry_proto_common_v1_ArrayValue_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_common_v1_ArrayValue_DEFAULT NULL
|
||||
#define opentelemetry_proto_common_v1_ArrayValue_values_MSGTYPE opentelemetry_proto_common_v1_AnyValue
|
||||
|
||||
#define opentelemetry_proto_common_v1_KeyValueList_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, values, 1)
|
||||
#define opentelemetry_proto_common_v1_KeyValueList_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_common_v1_KeyValueList_DEFAULT NULL
|
||||
#define opentelemetry_proto_common_v1_KeyValueList_values_MSGTYPE opentelemetry_proto_common_v1_KeyValue
|
||||
|
||||
#define opentelemetry_proto_common_v1_KeyValue_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, SINGULAR, STRING, key, 1) \
|
||||
X(a, STATIC, OPTIONAL, MESSAGE, value, 2)
|
||||
#define opentelemetry_proto_common_v1_KeyValue_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_common_v1_KeyValue_DEFAULT NULL
|
||||
#define opentelemetry_proto_common_v1_KeyValue_value_MSGTYPE opentelemetry_proto_common_v1_AnyValue
|
||||
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, SINGULAR, STRING, name, 1) \
|
||||
X(a, CALLBACK, SINGULAR, STRING, version, 2) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, attributes, 3) \
|
||||
X(a, STATIC, SINGULAR, UINT32, dropped_attributes_count, 4)
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_DEFAULT NULL
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue
|
||||
|
||||
extern const pb_msgdesc_t opentelemetry_proto_common_v1_AnyValue_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_common_v1_ArrayValue_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_common_v1_KeyValueList_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_common_v1_KeyValue_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_common_v1_InstrumentationScope_msg;
|
||||
|
||||
/* Defines for backwards compatibility with code written before nanopb-0.4.0 */
|
||||
#define opentelemetry_proto_common_v1_AnyValue_fields &opentelemetry_proto_common_v1_AnyValue_msg
|
||||
#define opentelemetry_proto_common_v1_ArrayValue_fields &opentelemetry_proto_common_v1_ArrayValue_msg
|
||||
#define opentelemetry_proto_common_v1_KeyValueList_fields &opentelemetry_proto_common_v1_KeyValueList_msg
|
||||
#define opentelemetry_proto_common_v1_KeyValue_fields &opentelemetry_proto_common_v1_KeyValue_msg
|
||||
#define opentelemetry_proto_common_v1_InstrumentationScope_fields &opentelemetry_proto_common_v1_InstrumentationScope_msg
|
||||
|
||||
/* Maximum encoded size of messages (where known) */
|
||||
/* opentelemetry_proto_common_v1_AnyValue_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_common_v1_ArrayValue_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_common_v1_KeyValueList_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_common_v1_KeyValue_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_common_v1_InstrumentationScope_size depends on runtime parameters */
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
# Needed to generate callback for data types within Metrics which isn't generated for oneof types by default
|
||||
opentelemetry.proto.metrics.v1.Metric submsg_callback:true;
|
||||
|
|
@ -1,67 +0,0 @@
|
|||
/* Automatically generated nanopb constant definitions */
|
||||
/* Generated by nanopb-0.4.8-dev */
|
||||
|
||||
#include "opentelemetry/metrics.pb.h"
|
||||
#if PB_PROTO_HEADER_VERSION != 40
|
||||
#error Regenerate this file with the current version of nanopb generator.
|
||||
#endif
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_MetricsData, opentelemetry_proto_metrics_v1_MetricsData, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_ResourceMetrics, opentelemetry_proto_metrics_v1_ResourceMetrics, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_ScopeMetrics, opentelemetry_proto_metrics_v1_ScopeMetrics, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_Metric, opentelemetry_proto_metrics_v1_Metric, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_Gauge, opentelemetry_proto_metrics_v1_Gauge, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_Sum, opentelemetry_proto_metrics_v1_Sum, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_Histogram, opentelemetry_proto_metrics_v1_Histogram, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_ExponentialHistogram, opentelemetry_proto_metrics_v1_ExponentialHistogram, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_Summary, opentelemetry_proto_metrics_v1_Summary, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_NumberDataPoint, opentelemetry_proto_metrics_v1_NumberDataPoint, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_HistogramDataPoint, opentelemetry_proto_metrics_v1_HistogramDataPoint, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_SummaryDataPoint, opentelemetry_proto_metrics_v1_SummaryDataPoint, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile, opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile, AUTO)
|
||||
|
||||
|
||||
PB_BIND(opentelemetry_proto_metrics_v1_Exemplar, opentelemetry_proto_metrics_v1_Exemplar, AUTO)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#ifndef PB_CONVERT_DOUBLE_FLOAT
|
||||
/* On some platforms (such as AVR), double is really float.
|
||||
* To be able to encode/decode double on these platforms, you need.
|
||||
* to define PB_CONVERT_DOUBLE_FLOAT in pb.h or compiler command line.
|
||||
*/
|
||||
PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES)
|
||||
#endif
|
||||
|
||||
|
|
@ -1,966 +0,0 @@
|
|||
/* Automatically generated nanopb header */
|
||||
/* Generated by nanopb-0.4.8-dev */
|
||||
|
||||
#ifndef PB_OPENTELEMETRY_PROTO_METRICS_V1_OPENTELEMETRY_PROTO_METRICS_V1_METRICS_PB_H_INCLUDED
|
||||
#define PB_OPENTELEMETRY_PROTO_METRICS_V1_OPENTELEMETRY_PROTO_METRICS_V1_METRICS_PB_H_INCLUDED
|
||||
#include <nanopb/pb.h>
|
||||
#include "opentelemetry/common.pb.h"
|
||||
#include "opentelemetry/resource.pb.h"
|
||||
|
||||
#if PB_PROTO_HEADER_VERSION != 40
|
||||
#error Regenerate this file with the current version of nanopb generator.
|
||||
#endif
|
||||
|
||||
/* Enum definitions */
|
||||
/* AggregationTemporality defines how a metric aggregator reports aggregated
|
||||
values. It describes how those values relate to the time interval over
|
||||
which they are aggregated. */
|
||||
typedef enum _opentelemetry_proto_metrics_v1_AggregationTemporality {
|
||||
/* UNSPECIFIED is the default AggregationTemporality, it MUST not be used. */
|
||||
opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED = 0,
|
||||
/* DELTA is an AggregationTemporality for a metric aggregator which reports
|
||||
changes since last report time. Successive metrics contain aggregation of
|
||||
values from continuous and non-overlapping intervals.
|
||||
|
||||
The values for a DELTA metric are based only on the time interval
|
||||
associated with one measurement cycle. There is no dependency on
|
||||
previous measurements like is the case for CUMULATIVE metrics.
|
||||
|
||||
For example, consider a system measuring the number of requests that
|
||||
it receives and reports the sum of these requests every second as a
|
||||
DELTA metric:
|
||||
|
||||
1. The system starts receiving at time=t_0.
|
||||
2. A request is received, the system measures 1 request.
|
||||
3. A request is received, the system measures 1 request.
|
||||
4. A request is received, the system measures 1 request.
|
||||
5. The 1 second collection cycle ends. A metric is exported for the
|
||||
number of requests received over the interval of time t_0 to
|
||||
t_0+1 with a value of 3.
|
||||
6. A request is received, the system measures 1 request.
|
||||
7. A request is received, the system measures 1 request.
|
||||
8. The 1 second collection cycle ends. A metric is exported for the
|
||||
number of requests received over the interval of time t_0+1 to
|
||||
t_0+2 with a value of 2. */
|
||||
opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA = 1,
|
||||
/* CUMULATIVE is an AggregationTemporality for a metric aggregator which
|
||||
reports changes since a fixed start time. This means that current values
|
||||
of a CUMULATIVE metric depend on all previous measurements since the
|
||||
start time. Because of this, the sender is required to retain this state
|
||||
in some form. If this state is lost or invalidated, the CUMULATIVE metric
|
||||
values MUST be reset and a new fixed start time following the last
|
||||
reported measurement time sent MUST be used.
|
||||
|
||||
For example, consider a system measuring the number of requests that
|
||||
it receives and reports the sum of these requests every second as a
|
||||
CUMULATIVE metric:
|
||||
|
||||
1. The system starts receiving at time=t_0.
|
||||
2. A request is received, the system measures 1 request.
|
||||
3. A request is received, the system measures 1 request.
|
||||
4. A request is received, the system measures 1 request.
|
||||
5. The 1 second collection cycle ends. A metric is exported for the
|
||||
number of requests received over the interval of time t_0 to
|
||||
t_0+1 with a value of 3.
|
||||
6. A request is received, the system measures 1 request.
|
||||
7. A request is received, the system measures 1 request.
|
||||
8. The 1 second collection cycle ends. A metric is exported for the
|
||||
number of requests received over the interval of time t_0 to
|
||||
t_0+2 with a value of 5.
|
||||
9. The system experiences a fault and loses state.
|
||||
10. The system recovers and resumes receiving at time=t_1.
|
||||
11. A request is received, the system measures 1 request.
|
||||
12. The 1 second collection cycle ends. A metric is exported for the
|
||||
number of requests received over the interval of time t_1 to
|
||||
t_0+1 with a value of 1.
|
||||
|
||||
Note: Even though, when reporting changes since last report time, using
|
||||
CUMULATIVE is valid, it is not recommended. This may cause problems for
|
||||
systems that do not use start_time to determine when the aggregation
|
||||
value was reset (e.g. Prometheus). */
|
||||
opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE = 2
|
||||
} opentelemetry_proto_metrics_v1_AggregationTemporality;
|
||||
|
||||
/* DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
|
||||
bit-field representing 32 distinct boolean flags. Each flag defined in this
|
||||
enum is a bit-mask. To test the presence of a single flag in the flags of
|
||||
a data point, for example, use an expression like:
|
||||
|
||||
(point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK */
|
||||
typedef enum _opentelemetry_proto_metrics_v1_DataPointFlags {
|
||||
/* The zero value for the enum. Should not be used for comparisons.
|
||||
Instead use bitwise "and" with the appropriate mask as shown above. */
|
||||
opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_DO_NOT_USE = 0,
|
||||
/* This DataPoint is valid but has no recorded value. This value
|
||||
SHOULD be used to reflect explicitly missing data in a series, as
|
||||
for an equivalent to the Prometheus "staleness marker". */
|
||||
opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK = 1
|
||||
} opentelemetry_proto_metrics_v1_DataPointFlags;
|
||||
|
||||
/* Struct definitions */
|
||||
/* MetricsData represents the metrics data that can be stored in a persistent
|
||||
storage, OR can be embedded by other protocols that transfer OTLP metrics
|
||||
data but do not implement the OTLP protocol.
|
||||
|
||||
The main difference between this message and collector protocol is that
|
||||
in this message there will not be any "control" or "metadata" specific to
|
||||
OTLP protocol.
|
||||
|
||||
When new fields are added into this message, the OTLP request MUST be updated
|
||||
as well. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_MetricsData {
|
||||
/* An array of ResourceMetrics.
|
||||
For data coming from a single resource this array will typically contain
|
||||
one element. Intermediary nodes that receive data from multiple origins
|
||||
typically batch the data before forwarding further and in that case this
|
||||
array will contain multiple elements. */
|
||||
pb_callback_t resource_metrics;
|
||||
} opentelemetry_proto_metrics_v1_MetricsData;
|
||||
|
||||
/* A collection of ScopeMetrics from a Resource. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_ResourceMetrics {
|
||||
/* The resource for the metrics in this message.
|
||||
If this field is not set then no resource info is known. */
|
||||
bool has_resource;
|
||||
opentelemetry_proto_resource_v1_Resource resource;
|
||||
/* A list of metrics that originate from a resource. */
|
||||
pb_callback_t scope_metrics;
|
||||
/* This schema_url applies to the data in the "resource" field. It does not apply
|
||||
to the data in the "scope_metrics" field which have their own schema_url field. */
|
||||
pb_callback_t schema_url;
|
||||
} opentelemetry_proto_metrics_v1_ResourceMetrics;
|
||||
|
||||
/* A collection of Metrics produced by an Scope. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_ScopeMetrics {
|
||||
/* The instrumentation scope information for the metrics in this message.
|
||||
Semantically when InstrumentationScope isn't set, it is equivalent with
|
||||
an empty instrumentation scope name (unknown). */
|
||||
bool has_scope;
|
||||
opentelemetry_proto_common_v1_InstrumentationScope scope;
|
||||
/* A list of metrics that originate from an instrumentation library. */
|
||||
pb_callback_t metrics;
|
||||
/* This schema_url applies to all metrics in the "metrics" field. */
|
||||
pb_callback_t schema_url;
|
||||
} opentelemetry_proto_metrics_v1_ScopeMetrics;
|
||||
|
||||
/* Gauge represents the type of a scalar metric that always exports the
|
||||
"current value" for every data point. It should be used for an "unknown"
|
||||
aggregation.
|
||||
|
||||
A Gauge does not support different aggregation temporalities. Given the
|
||||
aggregation is unknown, points cannot be combined using the same
|
||||
aggregation, regardless of aggregation temporalities. Therefore,
|
||||
AggregationTemporality is not included. Consequently, this also means
|
||||
"StartTimeUnixNano" is ignored for all data points. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_Gauge {
|
||||
pb_callback_t data_points;
|
||||
} opentelemetry_proto_metrics_v1_Gauge;
|
||||
|
||||
/* Sum represents the type of a scalar metric that is calculated as a sum of all
|
||||
reported measurements over a time interval. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_Sum {
|
||||
pb_callback_t data_points;
|
||||
/* aggregation_temporality describes if the aggregator reports delta changes
|
||||
since last report time, or cumulative changes since a fixed start time. */
|
||||
opentelemetry_proto_metrics_v1_AggregationTemporality aggregation_temporality;
|
||||
/* If "true" means that the sum is monotonic. */
|
||||
bool is_monotonic;
|
||||
} opentelemetry_proto_metrics_v1_Sum;
|
||||
|
||||
/* Histogram represents the type of a metric that is calculated by aggregating
|
||||
as a Histogram of all reported measurements over a time interval. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_Histogram {
|
||||
pb_callback_t data_points;
|
||||
/* aggregation_temporality describes if the aggregator reports delta changes
|
||||
since last report time, or cumulative changes since a fixed start time. */
|
||||
opentelemetry_proto_metrics_v1_AggregationTemporality aggregation_temporality;
|
||||
} opentelemetry_proto_metrics_v1_Histogram;
|
||||
|
||||
/* ExponentialHistogram represents the type of a metric that is calculated by aggregating
|
||||
as a ExponentialHistogram of all reported double measurements over a time interval. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_ExponentialHistogram {
|
||||
pb_callback_t data_points;
|
||||
/* aggregation_temporality describes if the aggregator reports delta changes
|
||||
since last report time, or cumulative changes since a fixed start time. */
|
||||
opentelemetry_proto_metrics_v1_AggregationTemporality aggregation_temporality;
|
||||
} opentelemetry_proto_metrics_v1_ExponentialHistogram;
|
||||
|
||||
/* Summary metric data are used to convey quantile summaries,
|
||||
a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
|
||||
and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
|
||||
data type. These data points cannot always be merged in a meaningful way.
|
||||
While they can be useful in some applications, histogram data points are
|
||||
recommended for new applications. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_Summary {
|
||||
pb_callback_t data_points;
|
||||
} opentelemetry_proto_metrics_v1_Summary;
|
||||
|
||||
/* Defines a Metric which has one or more timeseries. The following is a
|
||||
brief summary of the Metric data model. For more details, see:
|
||||
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
|
||||
|
||||
|
||||
The data model and relation between entities is shown in the
|
||||
diagram below. Here, "DataPoint" is the term used to refer to any
|
||||
one of the specific data point value types, and "points" is the term used
|
||||
to refer to any one of the lists of points contained in the Metric.
|
||||
|
||||
- Metric is composed of a metadata and data.
|
||||
- Metadata part contains a name, description, unit.
|
||||
- Data is one of the possible types (Sum, Gauge, Histogram, Summary).
|
||||
- DataPoint contains timestamps, attributes, and one of the possible value type
|
||||
fields.
|
||||
|
||||
Metric
|
||||
+------------+
|
||||
|name |
|
||||
|description |
|
||||
|unit | +------------------------------------+
|
||||
|data |---> |Gauge, Sum, Histogram, Summary, ... |
|
||||
+------------+ +------------------------------------+
|
||||
|
||||
Data [One of Gauge, Sum, Histogram, Summary, ...]
|
||||
+-----------+
|
||||
|... | // Metadata about the Data.
|
||||
|points |--+
|
||||
+-----------+ |
|
||||
| +---------------------------+
|
||||
| |DataPoint 1 |
|
||||
v |+------+------+ +------+ |
|
||||
+-----+ ||label |label |...|label | |
|
||||
| 1 |-->||value1|value2|...|valueN| |
|
||||
+-----+ |+------+------+ +------+ |
|
||||
| . | |+-----+ |
|
||||
| . | ||value| |
|
||||
| . | |+-----+ |
|
||||
| . | +---------------------------+
|
||||
| . | .
|
||||
| . | .
|
||||
| . | .
|
||||
| . | +---------------------------+
|
||||
| . | |DataPoint M |
|
||||
+-----+ |+------+------+ +------+ |
|
||||
| M |-->||label |label |...|label | |
|
||||
+-----+ ||value1|value2|...|valueN| |
|
||||
|+------+------+ +------+ |
|
||||
|+-----+ |
|
||||
||value| |
|
||||
|+-----+ |
|
||||
+---------------------------+
|
||||
|
||||
Each distinct type of DataPoint represents the output of a specific
|
||||
aggregation function, the result of applying the DataPoint's
|
||||
associated function of to one or more measurements.
|
||||
|
||||
All DataPoint types have three common fields:
|
||||
- Attributes includes key-value pairs associated with the data point
|
||||
- TimeUnixNano is required, set to the end time of the aggregation
|
||||
- StartTimeUnixNano is optional, but strongly encouraged for DataPoints
|
||||
having an AggregationTemporality field, as discussed below.
|
||||
|
||||
Both TimeUnixNano and StartTimeUnixNano values are expressed as
|
||||
UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
|
||||
|
||||
# TimeUnixNano
|
||||
|
||||
This field is required, having consistent interpretation across
|
||||
DataPoint types. TimeUnixNano is the moment corresponding to when
|
||||
the data point's aggregate value was captured.
|
||||
|
||||
Data points with the 0 value for TimeUnixNano SHOULD be rejected
|
||||
by consumers.
|
||||
|
||||
# StartTimeUnixNano
|
||||
|
||||
StartTimeUnixNano in general allows detecting when a sequence of
|
||||
observations is unbroken. This field indicates to consumers the
|
||||
start time for points with cumulative and delta
|
||||
AggregationTemporality, and it should be included whenever possible
|
||||
to support correct rate calculation. Although it may be omitted
|
||||
when the start time is truly unknown, setting StartTimeUnixNano is
|
||||
strongly encouraged. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_Metric {
|
||||
/* name of the metric, including its DNS name prefix. It must be unique. */
|
||||
pb_callback_t name;
|
||||
/* description of the metric, which can be used in documentation. */
|
||||
pb_callback_t description;
|
||||
/* unit in which the metric value is reported. Follows the format
|
||||
described by http://unitsofmeasure.org/ucum.html. */
|
||||
pb_callback_t unit;
|
||||
pb_callback_t cb_data;
|
||||
pb_size_t which_data;
|
||||
union {
|
||||
opentelemetry_proto_metrics_v1_Gauge gauge;
|
||||
opentelemetry_proto_metrics_v1_Sum sum;
|
||||
opentelemetry_proto_metrics_v1_Histogram histogram;
|
||||
opentelemetry_proto_metrics_v1_ExponentialHistogram exponential_histogram;
|
||||
opentelemetry_proto_metrics_v1_Summary summary;
|
||||
} data;
|
||||
} opentelemetry_proto_metrics_v1_Metric;
|
||||
|
||||
/* NumberDataPoint is a single data point in a timeseries that describes the
|
||||
time-varying scalar value of a metric. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_NumberDataPoint {
|
||||
/* StartTimeUnixNano is optional but strongly encouraged, see the
|
||||
the detailed comments above Metric.
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t start_time_unix_nano;
|
||||
/* TimeUnixNano is required, see the detailed comments above Metric.
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t time_unix_nano;
|
||||
pb_size_t which_value;
|
||||
union {
|
||||
double as_double;
|
||||
int64_t as_int;
|
||||
} value;
|
||||
/* (Optional) List of exemplars collected from
|
||||
measurements that were used to form the data point */
|
||||
pb_callback_t exemplars;
|
||||
/* The set of key/value pairs that uniquely identify the timeseries from
|
||||
where this point belongs. The list may be empty (may contain 0 elements).
|
||||
Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
attribute with the same key). */
|
||||
pb_callback_t attributes;
|
||||
/* Flags that apply to this specific data point. See DataPointFlags
|
||||
for the available flags and their meaning. */
|
||||
uint32_t flags;
|
||||
} opentelemetry_proto_metrics_v1_NumberDataPoint;
|
||||
|
||||
/* HistogramDataPoint is a single data point in a timeseries that describes the
|
||||
time-varying values of a Histogram. A Histogram contains summary statistics
|
||||
for a population of values, it may optionally contain the distribution of
|
||||
those values across a set of buckets.
|
||||
|
||||
If the histogram contains the distribution of values, then both
|
||||
"explicit_bounds" and "bucket counts" fields must be defined.
|
||||
If the histogram does not contain the distribution of values, then both
|
||||
"explicit_bounds" and "bucket_counts" must be omitted and only "count" and
|
||||
"sum" are known. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_HistogramDataPoint {
|
||||
/* StartTimeUnixNano is optional but strongly encouraged, see the
|
||||
the detailed comments above Metric.
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t start_time_unix_nano;
|
||||
/* TimeUnixNano is required, see the detailed comments above Metric.
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t time_unix_nano;
|
||||
/* count is the number of values in the population. Must be non-negative. This
|
||||
value must be equal to the sum of the "count" fields in buckets if a
|
||||
histogram is provided. */
|
||||
uint64_t count;
|
||||
/* sum of the values in the population. If count is zero then this field
|
||||
must be zero.
|
||||
|
||||
Note: Sum should only be filled out when measuring non-negative discrete
|
||||
events, and is assumed to be monotonic over the values of these events.
|
||||
Negative events *can* be recorded, but sum should not be filled out when
|
||||
doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
||||
see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram */
|
||||
bool has_sum;
|
||||
double sum;
|
||||
/* bucket_counts is an optional field contains the count values of histogram
|
||||
for each bucket.
|
||||
|
||||
The sum of the bucket_counts must equal the value in the count field.
|
||||
|
||||
The number of elements in bucket_counts array must be by one greater than
|
||||
the number of elements in explicit_bounds array. */
|
||||
pb_callback_t bucket_counts;
|
||||
/* explicit_bounds specifies buckets with explicitly defined bounds for values.
|
||||
|
||||
The boundaries for bucket at index i are:
|
||||
|
||||
(-infinity, explicit_bounds[i]] for i == 0
|
||||
(explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
|
||||
(explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
|
||||
|
||||
The values in the explicit_bounds array must be strictly increasing.
|
||||
|
||||
Histogram buckets are inclusive of their upper boundary, except the last
|
||||
bucket where the boundary is at infinity. This format is intentionally
|
||||
compatible with the OpenMetrics histogram definition. */
|
||||
pb_callback_t explicit_bounds;
|
||||
/* (Optional) List of exemplars collected from
|
||||
measurements that were used to form the data point */
|
||||
pb_callback_t exemplars;
|
||||
/* The set of key/value pairs that uniquely identify the timeseries from
|
||||
where this point belongs. The list may be empty (may contain 0 elements).
|
||||
Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
attribute with the same key). */
|
||||
pb_callback_t attributes;
|
||||
/* Flags that apply to this specific data point. See DataPointFlags
|
||||
for the available flags and their meaning. */
|
||||
uint32_t flags;
|
||||
/* min is the minimum value over (start_time, end_time]. */
|
||||
bool has_min;
|
||||
double min;
|
||||
/* max is the maximum value over (start_time, end_time]. */
|
||||
bool has_max;
|
||||
double max;
|
||||
} opentelemetry_proto_metrics_v1_HistogramDataPoint;
|
||||
|
||||
/* Buckets are a set of bucket counts, encoded in a contiguous array
|
||||
of counts. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets {
|
||||
/* Offset is the bucket index of the first entry in the bucket_counts array.
|
||||
|
||||
Note: This uses a varint encoding as a simple form of compression. */
|
||||
int32_t offset;
|
||||
/* bucket_counts is an array of count values, where bucket_counts[i] carries
|
||||
the count of the bucket at index (offset+i). bucket_counts[i] is the count
|
||||
of values greater than base^(offset+i) and less than or equal to
|
||||
base^(offset+i+1).
|
||||
|
||||
Note: By contrast, the explicit HistogramDataPoint uses
|
||||
fixed64. This field is expected to have many buckets,
|
||||
especially zeros, so uint64 has been selected to ensure
|
||||
varint encoding. */
|
||||
pb_callback_t bucket_counts;
|
||||
} opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets;
|
||||
|
||||
/* ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
|
||||
time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
|
||||
summary statistics for a population of values, it may optionally contain the
|
||||
distribution of those values across a set of buckets. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint {
|
||||
/* The set of key/value pairs that uniquely identify the timeseries from
|
||||
where this point belongs. The list may be empty (may contain 0 elements).
|
||||
Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
attribute with the same key). */
|
||||
pb_callback_t attributes;
|
||||
/* StartTimeUnixNano is optional but strongly encouraged, see the
|
||||
the detailed comments above Metric.
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t start_time_unix_nano;
|
||||
/* TimeUnixNano is required, see the detailed comments above Metric.
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t time_unix_nano;
|
||||
/* count is the number of values in the population. Must be
|
||||
non-negative. This value must be equal to the sum of the "bucket_counts"
|
||||
values in the positive and negative Buckets plus the "zero_count" field. */
|
||||
uint64_t count;
|
||||
/* sum of the values in the population. If count is zero then this field
|
||||
must be zero.
|
||||
|
||||
Note: Sum should only be filled out when measuring non-negative discrete
|
||||
events, and is assumed to be monotonic over the values of these events.
|
||||
Negative events *can* be recorded, but sum should not be filled out when
|
||||
doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
||||
see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram */
|
||||
bool has_sum;
|
||||
double sum;
|
||||
/* scale describes the resolution of the histogram. Boundaries are
|
||||
located at powers of the base, where:
|
||||
|
||||
base = (2^(2^-scale))
|
||||
|
||||
The histogram bucket identified by `index`, a signed integer,
|
||||
contains values that are greater than (base^index) and
|
||||
less than or equal to (base^(index+1)).
|
||||
|
||||
The positive and negative ranges of the histogram are expressed
|
||||
separately. Negative values are mapped by their absolute value
|
||||
into the negative range using the same scale as the positive range.
|
||||
|
||||
scale is not restricted by the protocol, as the permissible
|
||||
values depend on the range of the data. */
|
||||
int32_t scale;
|
||||
/* zero_count is the count of values that are either exactly zero or
|
||||
within the region considered zero by the instrumentation at the
|
||||
tolerated degree of precision. This bucket stores values that
|
||||
cannot be expressed using the standard exponential formula as
|
||||
well as values that have been rounded to zero.
|
||||
|
||||
Implementations MAY consider the zero bucket to have probability
|
||||
mass equal to (zero_count / count). */
|
||||
uint64_t zero_count;
|
||||
/* positive carries the positive range of exponential bucket counts. */
|
||||
bool has_positive;
|
||||
opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets positive;
|
||||
/* negative carries the negative range of exponential bucket counts. */
|
||||
bool has_negative;
|
||||
opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets negative;
|
||||
/* Flags that apply to this specific data point. See DataPointFlags
|
||||
for the available flags and their meaning. */
|
||||
uint32_t flags;
|
||||
/* (Optional) List of exemplars collected from
|
||||
measurements that were used to form the data point */
|
||||
pb_callback_t exemplars;
|
||||
/* min is the minimum value over (start_time, end_time]. */
|
||||
bool has_min;
|
||||
double min;
|
||||
/* max is the maximum value over (start_time, end_time]. */
|
||||
bool has_max;
|
||||
double max;
|
||||
/* ZeroThreshold may be optionally set to convey the width of the zero
|
||||
region. Where the zero region is defined as the closed interval
|
||||
[-ZeroThreshold, ZeroThreshold].
|
||||
When ZeroThreshold is 0, zero count bucket stores values that cannot be
|
||||
expressed using the standard exponential formula as well as values that
|
||||
have been rounded to zero. */
|
||||
double zero_threshold;
|
||||
} opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint;
|
||||
|
||||
/* SummaryDataPoint is a single data point in a timeseries that describes the
|
||||
time-varying values of a Summary metric. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_SummaryDataPoint {
|
||||
/* StartTimeUnixNano is optional but strongly encouraged, see the
|
||||
the detailed comments above Metric.
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t start_time_unix_nano;
|
||||
/* TimeUnixNano is required, see the detailed comments above Metric.
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t time_unix_nano;
|
||||
/* count is the number of values in the population. Must be non-negative. */
|
||||
uint64_t count;
|
||||
/* sum of the values in the population. If count is zero then this field
|
||||
must be zero.
|
||||
|
||||
Note: Sum should only be filled out when measuring non-negative discrete
|
||||
events, and is assumed to be monotonic over the values of these events.
|
||||
Negative events *can* be recorded, but sum should not be filled out when
|
||||
doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
||||
see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary */
|
||||
double sum;
|
||||
/* (Optional) list of values at different quantiles of the distribution calculated
|
||||
from the current snapshot. The quantiles must be strictly increasing. */
|
||||
pb_callback_t quantile_values;
|
||||
/* The set of key/value pairs that uniquely identify the timeseries from
|
||||
where this point belongs. The list may be empty (may contain 0 elements).
|
||||
Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
attribute with the same key). */
|
||||
pb_callback_t attributes;
|
||||
/* Flags that apply to this specific data point. See DataPointFlags
|
||||
for the available flags and their meaning. */
|
||||
uint32_t flags;
|
||||
} opentelemetry_proto_metrics_v1_SummaryDataPoint;
|
||||
|
||||
/* Represents the value at a given quantile of a distribution.
|
||||
|
||||
To record Min and Max values following conventions are used:
|
||||
- The 1.0 quantile is equivalent to the maximum value observed.
|
||||
- The 0.0 quantile is equivalent to the minimum value observed.
|
||||
|
||||
See the following issue for more context:
|
||||
https://github.com/open-telemetry/opentelemetry-proto/issues/125 */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile {
|
||||
/* The quantile of a distribution. Must be in the interval
|
||||
[0.0, 1.0]. */
|
||||
double quantile;
|
||||
/* The value at the given quantile of a distribution.
|
||||
|
||||
Quantile values must NOT be negative. */
|
||||
double value;
|
||||
} opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile;
|
||||
|
||||
/* A representation of an exemplar, which is a sample input measurement.
|
||||
Exemplars also hold information about the environment when the measurement
|
||||
was recorded, for example the span and trace ID of the active span when the
|
||||
exemplar was recorded. */
|
||||
typedef struct _opentelemetry_proto_metrics_v1_Exemplar {
|
||||
/* time_unix_nano is the exact time when this exemplar was recorded
|
||||
|
||||
Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
1970. */
|
||||
uint64_t time_unix_nano;
|
||||
pb_size_t which_value;
|
||||
union {
|
||||
double as_double;
|
||||
int64_t as_int;
|
||||
} value;
|
||||
/* (Optional) Span ID of the exemplar trace.
|
||||
span_id may be missing if the measurement is not recorded inside a trace
|
||||
or if the trace is not sampled. */
|
||||
pb_callback_t span_id;
|
||||
/* (Optional) Trace ID of the exemplar trace.
|
||||
trace_id may be missing if the measurement is not recorded inside a trace
|
||||
or if the trace is not sampled. */
|
||||
pb_callback_t trace_id;
|
||||
/* The set of key/value pairs that were filtered out by the aggregator, but
|
||||
recorded alongside the original measurement. Only key/value pairs that were
|
||||
filtered out by the aggregator should be included */
|
||||
pb_callback_t filtered_attributes;
|
||||
} opentelemetry_proto_metrics_v1_Exemplar;
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Helper constants for enums */
|
||||
#define _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
|
||||
#define _opentelemetry_proto_metrics_v1_AggregationTemporality_MAX opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE
|
||||
#define _opentelemetry_proto_metrics_v1_AggregationTemporality_ARRAYSIZE ((opentelemetry_proto_metrics_v1_AggregationTemporality)(opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE+1))
|
||||
|
||||
#define _opentelemetry_proto_metrics_v1_DataPointFlags_MIN opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_DO_NOT_USE
|
||||
#define _opentelemetry_proto_metrics_v1_DataPointFlags_MAX opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK
|
||||
#define _opentelemetry_proto_metrics_v1_DataPointFlags_ARRAYSIZE ((opentelemetry_proto_metrics_v1_DataPointFlags)(opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK+1))
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_Sum_aggregation_temporality_ENUMTYPE opentelemetry_proto_metrics_v1_AggregationTemporality
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_aggregation_temporality_ENUMTYPE opentelemetry_proto_metrics_v1_AggregationTemporality
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_aggregation_temporality_ENUMTYPE opentelemetry_proto_metrics_v1_AggregationTemporality
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/* Initializer values for message structs */
|
||||
#define opentelemetry_proto_metrics_v1_MetricsData_init_default {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_init_default {false, opentelemetry_proto_resource_v1_Resource_init_default, {{NULL}, NULL}, {{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_init_default {false, opentelemetry_proto_common_v1_InstrumentationScope_init_default, {{NULL}, NULL}, {{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_Metric_init_default {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0, {opentelemetry_proto_metrics_v1_Gauge_init_default}}
|
||||
#define opentelemetry_proto_metrics_v1_Gauge_init_default {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_Sum_init_default {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN, 0}
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_init_default {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN}
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_init_default {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN}
|
||||
#define opentelemetry_proto_metrics_v1_Summary_init_default {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_init_default {0, 0, 0, {0}, {{NULL}, NULL}, {{NULL}, NULL}, 0}
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_init_default {0, 0, 0, false, 0, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0, false, 0, false, 0}
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_init_default {{{NULL}, NULL}, 0, 0, 0, false, 0, 0, 0, false, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_default, false, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_default, 0, {{NULL}, NULL}, false, 0, false, 0, 0}
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_default {0, {{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_init_default {0, 0, 0, 0, {{NULL}, NULL}, {{NULL}, NULL}, 0}
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_init_default {0, 0}
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_init_default {0, 0, {0}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_MetricsData_init_zero {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_init_zero {false, opentelemetry_proto_resource_v1_Resource_init_zero, {{NULL}, NULL}, {{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_init_zero {false, opentelemetry_proto_common_v1_InstrumentationScope_init_zero, {{NULL}, NULL}, {{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_Metric_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0, {opentelemetry_proto_metrics_v1_Gauge_init_zero}}
|
||||
#define opentelemetry_proto_metrics_v1_Gauge_init_zero {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_Sum_init_zero {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN, 0}
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_init_zero {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN}
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_init_zero {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN}
|
||||
#define opentelemetry_proto_metrics_v1_Summary_init_zero {{{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_init_zero {0, 0, 0, {0}, {{NULL}, NULL}, {{NULL}, NULL}, 0}
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_init_zero {0, 0, 0, false, 0, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0, false, 0, false, 0}
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_init_zero {{{NULL}, NULL}, 0, 0, 0, false, 0, 0, 0, false, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_zero, false, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_zero, 0, {{NULL}, NULL}, false, 0, false, 0, 0}
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_zero {0, {{NULL}, NULL}}
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_init_zero {0, 0, 0, 0, {{NULL}, NULL}, {{NULL}, NULL}, 0}
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_init_zero {0, 0}
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_init_zero {0, 0, {0}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}}
|
||||
|
||||
/* Field tags (for use in manual encoding/decoding) */
|
||||
#define opentelemetry_proto_metrics_v1_MetricsData_resource_metrics_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_resource_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_scope_metrics_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_schema_url_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_scope_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_metrics_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_schema_url_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_Gauge_data_points_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_Sum_data_points_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_Sum_aggregation_temporality_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_Sum_is_monotonic_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_data_points_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_aggregation_temporality_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_data_points_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_aggregation_temporality_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_Summary_data_points_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_Metric_name_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_Metric_description_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_Metric_unit_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_Metric_gauge_tag 5
|
||||
#define opentelemetry_proto_metrics_v1_Metric_sum_tag 7
|
||||
#define opentelemetry_proto_metrics_v1_Metric_histogram_tag 9
|
||||
#define opentelemetry_proto_metrics_v1_Metric_exponential_histogram_tag 10
|
||||
#define opentelemetry_proto_metrics_v1_Metric_summary_tag 11
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_start_time_unix_nano_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_time_unix_nano_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_as_double_tag 4
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_as_int_tag 6
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_exemplars_tag 5
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_attributes_tag 7
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_flags_tag 8
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_start_time_unix_nano_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_time_unix_nano_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_count_tag 4
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_sum_tag 5
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_bucket_counts_tag 6
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_explicit_bounds_tag 7
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_exemplars_tag 8
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_attributes_tag 9
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_flags_tag 10
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_min_tag 11
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_max_tag 12
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_offset_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_bucket_counts_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_attributes_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_start_time_unix_nano_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_time_unix_nano_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_count_tag 4
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_sum_tag 5
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_scale_tag 6
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_zero_count_tag 7
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_positive_tag 8
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_negative_tag 9
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_flags_tag 10
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_exemplars_tag 11
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_min_tag 12
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_max_tag 13
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_zero_threshold_tag 14
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_start_time_unix_nano_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_time_unix_nano_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_count_tag 4
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_sum_tag 5
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_quantile_values_tag 6
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_attributes_tag 7
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_flags_tag 8
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_quantile_tag 1
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_value_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_time_unix_nano_tag 2
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_as_double_tag 3
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_as_int_tag 6
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_span_id_tag 4
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_trace_id_tag 5
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_filtered_attributes_tag 7
|
||||
|
||||
/* Struct field encoding specification for nanopb */
|
||||
#define opentelemetry_proto_metrics_v1_MetricsData_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, resource_metrics, 1)
|
||||
#define opentelemetry_proto_metrics_v1_MetricsData_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_MetricsData_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_MetricsData_resource_metrics_MSGTYPE opentelemetry_proto_metrics_v1_ResourceMetrics
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_FIELDLIST(X, a) \
|
||||
X(a, STATIC, OPTIONAL, MESSAGE, resource, 1) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, scope_metrics, 2) \
|
||||
X(a, CALLBACK, SINGULAR, STRING, schema_url, 3)
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_resource_MSGTYPE opentelemetry_proto_resource_v1_Resource
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_scope_metrics_MSGTYPE opentelemetry_proto_metrics_v1_ScopeMetrics
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_FIELDLIST(X, a) \
|
||||
X(a, STATIC, OPTIONAL, MESSAGE, scope, 1) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, metrics, 2) \
|
||||
X(a, CALLBACK, SINGULAR, STRING, schema_url, 3)
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_scope_MSGTYPE opentelemetry_proto_common_v1_InstrumentationScope
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_metrics_MSGTYPE opentelemetry_proto_metrics_v1_Metric
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_Metric_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, SINGULAR, STRING, name, 1) \
|
||||
X(a, CALLBACK, SINGULAR, STRING, description, 2) \
|
||||
X(a, CALLBACK, SINGULAR, STRING, unit, 3) \
|
||||
X(a, STATIC, ONEOF, MSG_W_CB, (data,gauge,data.gauge), 5) \
|
||||
X(a, STATIC, ONEOF, MSG_W_CB, (data,sum,data.sum), 7) \
|
||||
X(a, STATIC, ONEOF, MSG_W_CB, (data,histogram,data.histogram), 9) \
|
||||
X(a, STATIC, ONEOF, MSG_W_CB, (data,exponential_histogram,data.exponential_histogram), 10) \
|
||||
X(a, STATIC, ONEOF, MSG_W_CB, (data,summary,data.summary), 11)
|
||||
#define opentelemetry_proto_metrics_v1_Metric_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_Metric_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_Metric_data_gauge_MSGTYPE opentelemetry_proto_metrics_v1_Gauge
|
||||
#define opentelemetry_proto_metrics_v1_Metric_data_sum_MSGTYPE opentelemetry_proto_metrics_v1_Sum
|
||||
#define opentelemetry_proto_metrics_v1_Metric_data_histogram_MSGTYPE opentelemetry_proto_metrics_v1_Histogram
|
||||
#define opentelemetry_proto_metrics_v1_Metric_data_exponential_histogram_MSGTYPE opentelemetry_proto_metrics_v1_ExponentialHistogram
|
||||
#define opentelemetry_proto_metrics_v1_Metric_data_summary_MSGTYPE opentelemetry_proto_metrics_v1_Summary
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_Gauge_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1)
|
||||
#define opentelemetry_proto_metrics_v1_Gauge_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_Gauge_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_Gauge_data_points_MSGTYPE opentelemetry_proto_metrics_v1_NumberDataPoint
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_Sum_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1) \
|
||||
X(a, STATIC, SINGULAR, UENUM, aggregation_temporality, 2) \
|
||||
X(a, STATIC, SINGULAR, BOOL, is_monotonic, 3)
|
||||
#define opentelemetry_proto_metrics_v1_Sum_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_Sum_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_Sum_data_points_MSGTYPE opentelemetry_proto_metrics_v1_NumberDataPoint
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1) \
|
||||
X(a, STATIC, SINGULAR, UENUM, aggregation_temporality, 2)
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_data_points_MSGTYPE opentelemetry_proto_metrics_v1_HistogramDataPoint
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1) \
|
||||
X(a, STATIC, SINGULAR, UENUM, aggregation_temporality, 2)
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_data_points_MSGTYPE opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_Summary_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1)
|
||||
#define opentelemetry_proto_metrics_v1_Summary_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_Summary_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_Summary_data_points_MSGTYPE opentelemetry_proto_metrics_v1_SummaryDataPoint
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_FIELDLIST(X, a) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, start_time_unix_nano, 2) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 3) \
|
||||
X(a, STATIC, ONEOF, DOUBLE, (value,as_double,value.as_double), 4) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, exemplars, 5) \
|
||||
X(a, STATIC, ONEOF, SFIXED64, (value,as_int,value.as_int), 6) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, attributes, 7) \
|
||||
X(a, STATIC, SINGULAR, UINT32, flags, 8)
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_exemplars_MSGTYPE opentelemetry_proto_metrics_v1_Exemplar
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_FIELDLIST(X, a) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, start_time_unix_nano, 2) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 3) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, count, 4) \
|
||||
X(a, STATIC, OPTIONAL, DOUBLE, sum, 5) \
|
||||
X(a, CALLBACK, REPEATED, FIXED64, bucket_counts, 6) \
|
||||
X(a, CALLBACK, REPEATED, DOUBLE, explicit_bounds, 7) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, exemplars, 8) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, attributes, 9) \
|
||||
X(a, STATIC, SINGULAR, UINT32, flags, 10) \
|
||||
X(a, STATIC, OPTIONAL, DOUBLE, min, 11) \
|
||||
X(a, STATIC, OPTIONAL, DOUBLE, max, 12)
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_exemplars_MSGTYPE opentelemetry_proto_metrics_v1_Exemplar
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, attributes, 1) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, start_time_unix_nano, 2) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 3) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, count, 4) \
|
||||
X(a, STATIC, OPTIONAL, DOUBLE, sum, 5) \
|
||||
X(a, STATIC, SINGULAR, SINT32, scale, 6) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, zero_count, 7) \
|
||||
X(a, STATIC, OPTIONAL, MESSAGE, positive, 8) \
|
||||
X(a, STATIC, OPTIONAL, MESSAGE, negative, 9) \
|
||||
X(a, STATIC, SINGULAR, UINT32, flags, 10) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, exemplars, 11) \
|
||||
X(a, STATIC, OPTIONAL, DOUBLE, min, 12) \
|
||||
X(a, STATIC, OPTIONAL, DOUBLE, max, 13) \
|
||||
X(a, STATIC, SINGULAR, DOUBLE, zero_threshold, 14)
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_positive_MSGTYPE opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_negative_MSGTYPE opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_exemplars_MSGTYPE opentelemetry_proto_metrics_v1_Exemplar
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_FIELDLIST(X, a) \
|
||||
X(a, STATIC, SINGULAR, SINT32, offset, 1) \
|
||||
X(a, CALLBACK, REPEATED, UINT64, bucket_counts, 2)
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_DEFAULT NULL
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_FIELDLIST(X, a) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, start_time_unix_nano, 2) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 3) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, count, 4) \
|
||||
X(a, STATIC, SINGULAR, DOUBLE, sum, 5) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, quantile_values, 6) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, attributes, 7) \
|
||||
X(a, STATIC, SINGULAR, UINT32, flags, 8)
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_quantile_values_MSGTYPE opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_FIELDLIST(X, a) \
|
||||
X(a, STATIC, SINGULAR, DOUBLE, quantile, 1) \
|
||||
X(a, STATIC, SINGULAR, DOUBLE, value, 2)
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_CALLBACK NULL
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_DEFAULT NULL
|
||||
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_FIELDLIST(X, a) \
|
||||
X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 2) \
|
||||
X(a, STATIC, ONEOF, DOUBLE, (value,as_double,value.as_double), 3) \
|
||||
X(a, CALLBACK, SINGULAR, BYTES, span_id, 4) \
|
||||
X(a, CALLBACK, SINGULAR, BYTES, trace_id, 5) \
|
||||
X(a, STATIC, ONEOF, SFIXED64, (value,as_int,value.as_int), 6) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, filtered_attributes, 7)
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_DEFAULT NULL
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_filtered_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue
|
||||
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_MetricsData_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ResourceMetrics_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ScopeMetrics_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Metric_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Gauge_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Sum_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Histogram_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ExponentialHistogram_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Summary_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_NumberDataPoint_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_HistogramDataPoint_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_SummaryDataPoint_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_msg;
|
||||
extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Exemplar_msg;
|
||||
|
||||
/* Defines for backwards compatibility with code written before nanopb-0.4.0 */
|
||||
#define opentelemetry_proto_metrics_v1_MetricsData_fields &opentelemetry_proto_metrics_v1_MetricsData_msg
|
||||
#define opentelemetry_proto_metrics_v1_ResourceMetrics_fields &opentelemetry_proto_metrics_v1_ResourceMetrics_msg
|
||||
#define opentelemetry_proto_metrics_v1_ScopeMetrics_fields &opentelemetry_proto_metrics_v1_ScopeMetrics_msg
|
||||
#define opentelemetry_proto_metrics_v1_Metric_fields &opentelemetry_proto_metrics_v1_Metric_msg
|
||||
#define opentelemetry_proto_metrics_v1_Gauge_fields &opentelemetry_proto_metrics_v1_Gauge_msg
|
||||
#define opentelemetry_proto_metrics_v1_Sum_fields &opentelemetry_proto_metrics_v1_Sum_msg
|
||||
#define opentelemetry_proto_metrics_v1_Histogram_fields &opentelemetry_proto_metrics_v1_Histogram_msg
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogram_fields &opentelemetry_proto_metrics_v1_ExponentialHistogram_msg
|
||||
#define opentelemetry_proto_metrics_v1_Summary_fields &opentelemetry_proto_metrics_v1_Summary_msg
|
||||
#define opentelemetry_proto_metrics_v1_NumberDataPoint_fields &opentelemetry_proto_metrics_v1_NumberDataPoint_msg
|
||||
#define opentelemetry_proto_metrics_v1_HistogramDataPoint_fields &opentelemetry_proto_metrics_v1_HistogramDataPoint_msg
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_fields &opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_msg
|
||||
#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_fields &opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_msg
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_fields &opentelemetry_proto_metrics_v1_SummaryDataPoint_msg
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_fields &opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_msg
|
||||
#define opentelemetry_proto_metrics_v1_Exemplar_fields &opentelemetry_proto_metrics_v1_Exemplar_msg
|
||||
|
||||
/* Maximum encoded size of messages (where known) */
|
||||
/* opentelemetry_proto_metrics_v1_MetricsData_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_ResourceMetrics_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_ScopeMetrics_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_Metric_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_Gauge_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_Sum_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_Histogram_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_ExponentialHistogram_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_Summary_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_NumberDataPoint_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_HistogramDataPoint_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_SummaryDataPoint_size depends on runtime parameters */
|
||||
/* opentelemetry_proto_metrics_v1_Exemplar_size depends on runtime parameters */
|
||||
#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_size 18
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
/* Automatically generated nanopb constant definitions */
|
||||
/* Generated by nanopb-0.4.8-dev */
|
||||
|
||||
#include "opentelemetry/resource.pb.h"
|
||||
#if PB_PROTO_HEADER_VERSION != 40
|
||||
#error Regenerate this file with the current version of nanopb generator.
|
||||
#endif
|
||||
|
||||
PB_BIND(opentelemetry_proto_resource_v1_Resource, opentelemetry_proto_resource_v1_Resource, AUTO)
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
/* Automatically generated nanopb header */
|
||||
/* Generated by nanopb-0.4.8-dev */
|
||||
|
||||
#ifndef PB_OPENTELEMETRY_PROTO_RESOURCE_V1_OPENTELEMETRY_PROTO_RESOURCE_V1_RESOURCE_PB_H_INCLUDED
|
||||
#define PB_OPENTELEMETRY_PROTO_RESOURCE_V1_OPENTELEMETRY_PROTO_RESOURCE_V1_RESOURCE_PB_H_INCLUDED
|
||||
#include <nanopb/pb.h>
|
||||
#include "opentelemetry/common.pb.h"
|
||||
|
||||
#if PB_PROTO_HEADER_VERSION != 40
|
||||
#error Regenerate this file with the current version of nanopb generator.
|
||||
#endif
|
||||
|
||||
/* Struct definitions */
|
||||
/* Resource information. */
|
||||
typedef struct _opentelemetry_proto_resource_v1_Resource {
|
||||
/* Set of attributes that describe the resource.
|
||||
Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
attribute with the same key). */
|
||||
pb_callback_t attributes;
|
||||
/* dropped_attributes_count is the number of dropped attributes. If the value is 0, then
|
||||
no attributes were dropped. */
|
||||
uint32_t dropped_attributes_count;
|
||||
} opentelemetry_proto_resource_v1_Resource;
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Initializer values for message structs */
|
||||
#define opentelemetry_proto_resource_v1_Resource_init_default {{{NULL}, NULL}, 0}
|
||||
#define opentelemetry_proto_resource_v1_Resource_init_zero {{{NULL}, NULL}, 0}
|
||||
|
||||
/* Field tags (for use in manual encoding/decoding) */
|
||||
#define opentelemetry_proto_resource_v1_Resource_attributes_tag 1
|
||||
#define opentelemetry_proto_resource_v1_Resource_dropped_attributes_count_tag 2
|
||||
|
||||
/* Struct field encoding specification for nanopb */
|
||||
#define opentelemetry_proto_resource_v1_Resource_FIELDLIST(X, a) \
|
||||
X(a, CALLBACK, REPEATED, MESSAGE, attributes, 1) \
|
||||
X(a, STATIC, SINGULAR, UINT32, dropped_attributes_count, 2)
|
||||
#define opentelemetry_proto_resource_v1_Resource_CALLBACK pb_default_field_callback
|
||||
#define opentelemetry_proto_resource_v1_Resource_DEFAULT NULL
|
||||
#define opentelemetry_proto_resource_v1_Resource_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue
|
||||
|
||||
extern const pb_msgdesc_t opentelemetry_proto_resource_v1_Resource_msg;
|
||||
|
||||
/* Defines for backwards compatibility with code written before nanopb-0.4.0 */
|
||||
#define opentelemetry_proto_resource_v1_Resource_fields &opentelemetry_proto_resource_v1_Resource_msg
|
||||
|
||||
/* Maximum encoded size of messages (where known) */
|
||||
/* opentelemetry_proto_resource_v1_Resource_size depends on runtime parameters */
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librd - Rapid Development C library
|
||||
*
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* 2023, Confluent Inc.
|
||||
* Copyright (c) 2012, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -425,10 +424,6 @@ static RD_INLINE RD_UNUSED int rd_refcnt_get(rd_refcnt_t *R) {
|
|||
} while (0)
|
||||
|
||||
|
||||
#define RD_INTERFACE_CALL(i, name, ...) (i->name(i->opaque, __VA_ARGS__))
|
||||
|
||||
#define RD_CEIL_INTEGER_DIVISION(X, DEN) (((X) + ((DEN)-1)) / (DEN))
|
||||
|
||||
/**
|
||||
* @brief Utility types to hold memory,size tuple.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librd - Rapid Development C library
|
||||
*
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* Copyright (c) 2012, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librd - Rapid Development C library
|
||||
*
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* Copyright (c) 2012, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -139,7 +139,7 @@ rd_sockaddr_list_next(rd_sockaddr_list_t *rsal) {
|
|||
|
||||
#define RD_SOCKADDR_LIST_FOREACH(sinx, rsal) \
|
||||
for ((sinx) = &(rsal)->rsal_addr[0]; \
|
||||
(sinx) < &(rsal)->rsal_addr[(rsal)->rsal_cnt]; (sinx)++)
|
||||
(sinx) < &(rsal)->rsal_addr[(rsal)->rsal_len]; (sinx)++)
|
||||
|
||||
/**
|
||||
* Wrapper for getaddrinfo(3) that performs these additional tasks:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2014-2022, Magnus Edenhill
|
||||
* Copyright (c) 2014-2016 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* Copyright (c) 2018 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librd - Rapid Development C library
|
||||
*
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* Copyright (c) 2012-2016, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librd - Rapid Development C library
|
||||
*
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* Copyright (c) 2012-2016, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,169 +0,0 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2023 Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "rdbase64.h"
|
||||
|
||||
#if WITH_SSL
|
||||
#include <openssl/ssl.h>
|
||||
#else
|
||||
|
||||
#define conv_bin2ascii(a, table) ((table)[(a)&0x3f])
|
||||
|
||||
static const unsigned char data_bin2ascii[65] =
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
|
||||
|
||||
static int base64_encoding_conversion(unsigned char *out,
|
||||
const unsigned char *in,
|
||||
int dlen) {
|
||||
int i, ret = 0;
|
||||
unsigned long l;
|
||||
|
||||
for (i = dlen; i > 0; i -= 3) {
|
||||
if (i >= 3) {
|
||||
l = (((unsigned long)in[0]) << 16L) |
|
||||
(((unsigned long)in[1]) << 8L) | in[2];
|
||||
*(out++) = conv_bin2ascii(l >> 18L, data_bin2ascii);
|
||||
*(out++) = conv_bin2ascii(l >> 12L, data_bin2ascii);
|
||||
*(out++) = conv_bin2ascii(l >> 6L, data_bin2ascii);
|
||||
*(out++) = conv_bin2ascii(l, data_bin2ascii);
|
||||
} else {
|
||||
l = ((unsigned long)in[0]) << 16L;
|
||||
if (i == 2)
|
||||
l |= ((unsigned long)in[1] << 8L);
|
||||
|
||||
*(out++) = conv_bin2ascii(l >> 18L, data_bin2ascii);
|
||||
*(out++) = conv_bin2ascii(l >> 12L, data_bin2ascii);
|
||||
*(out++) =
|
||||
(i == 1) ? '='
|
||||
: conv_bin2ascii(l >> 6L, data_bin2ascii);
|
||||
*(out++) = '=';
|
||||
}
|
||||
ret += 4;
|
||||
in += 3;
|
||||
}
|
||||
|
||||
*out = '\0';
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Base64 encode binary input \p in, and write base64-encoded string
|
||||
* and it's size to \p out. out->ptr will be NULL in case of some issue
|
||||
* with the conversion or the conversion is not supported.
|
||||
*
|
||||
* @remark out->ptr must be freed after use.
|
||||
*/
|
||||
void rd_base64_encode(const rd_chariov_t *in, rd_chariov_t *out) {
|
||||
|
||||
size_t max_len;
|
||||
|
||||
/* OpenSSL takes an |int| argument so the input cannot exceed that. */
|
||||
if (in->size > INT_MAX) {
|
||||
out->ptr = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
max_len = (((in->size + 2) / 3) * 4) + 1;
|
||||
out->ptr = rd_malloc(max_len);
|
||||
|
||||
#if WITH_SSL
|
||||
out->size = EVP_EncodeBlock((unsigned char *)out->ptr,
|
||||
(unsigned char *)in->ptr, (int)in->size);
|
||||
#else
|
||||
out->size = base64_encoding_conversion(
|
||||
(unsigned char *)out->ptr, (unsigned char *)in->ptr, (int)in->size);
|
||||
#endif
|
||||
|
||||
rd_assert(out->size < max_len);
|
||||
out->ptr[out->size] = 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Base64 encode binary input \p in.
|
||||
* @returns a newly allocated, base64-encoded string or NULL in case of some
|
||||
* issue with the conversion or the conversion is not supported.
|
||||
*
|
||||
* @remark Returned string must be freed after use.
|
||||
*/
|
||||
char *rd_base64_encode_str(const rd_chariov_t *in) {
|
||||
rd_chariov_t out;
|
||||
rd_base64_encode(in, &out);
|
||||
return out.ptr;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Base64 decode input string \p in. Ignores leading and trailing
|
||||
* whitespace.
|
||||
* @returns * 0 on successes in which case a newly allocated binary string is
|
||||
* set in \p out (and size).
|
||||
* * -1 on invalid Base64.
|
||||
* * -2 on conversion not supported.
|
||||
*/
|
||||
int rd_base64_decode(const rd_chariov_t *in, rd_chariov_t *out) {
|
||||
|
||||
#if WITH_SSL
|
||||
size_t ret_len;
|
||||
|
||||
/* OpenSSL takes an |int| argument, so |in->size| must not exceed
|
||||
* that. */
|
||||
if (in->size % 4 != 0 || in->size > INT_MAX) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret_len = ((in->size / 4) * 3);
|
||||
out->ptr = rd_malloc(ret_len + 1);
|
||||
|
||||
if (EVP_DecodeBlock((unsigned char *)out->ptr, (unsigned char *)in->ptr,
|
||||
(int)in->size) == -1) {
|
||||
rd_free(out->ptr);
|
||||
out->ptr = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* EVP_DecodeBlock will pad the output with trailing NULs and count
|
||||
* them in the return value. */
|
||||
if (in->size > 1 && in->ptr[in->size - 1] == '=') {
|
||||
if (in->size > 2 && in->ptr[in->size - 2] == '=') {
|
||||
ret_len -= 2;
|
||||
} else {
|
||||
ret_len -= 1;
|
||||
}
|
||||
}
|
||||
|
||||
out->ptr[ret_len] = 0;
|
||||
out->size = ret_len;
|
||||
|
||||
return 0;
|
||||
#else
|
||||
return -2;
|
||||
#endif
|
||||
}
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2023 Confluent Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef _RDBASE64_H_
|
||||
#define _RDBASE64_H_
|
||||
|
||||
#include "rd.h"
|
||||
|
||||
void rd_base64_encode(const rd_chariov_t *in, rd_chariov_t *out);
|
||||
|
||||
char *rd_base64_encode_str(const rd_chariov_t *in);
|
||||
|
||||
int rd_base64_decode(const rd_chariov_t *in, rd_chariov_t *out);
|
||||
|
||||
#endif /* _RDBASE64_H_ */
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2017-2022, Magnus Edenhill
|
||||
* Copyright (c) 2017 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -660,16 +660,13 @@ size_t rd_buf_erase(rd_buf_t *rbuf, size_t absof, size_t size) {
|
|||
segremains);
|
||||
|
||||
seg->seg_of -= toerase;
|
||||
seg->seg_erased += toerase;
|
||||
rbuf->rbuf_len -= toerase;
|
||||
|
||||
of += toerase;
|
||||
|
||||
/* If segment is now empty, remove it */
|
||||
if (seg->seg_of == 0) {
|
||||
rbuf->rbuf_erased -= seg->seg_erased;
|
||||
if (seg->seg_of == 0)
|
||||
rd_buf_destroy_segment(rbuf, seg);
|
||||
}
|
||||
}
|
||||
|
||||
/* Update absolute offset of remaining segments */
|
||||
|
|
@ -712,7 +709,6 @@ int rd_buf_write_seek(rd_buf_t *rbuf, size_t absof) {
|
|||
next != seg;) {
|
||||
rd_segment_t *this = next;
|
||||
next = TAILQ_PREV(this, rd_segment_head, seg_link);
|
||||
rbuf->rbuf_erased -= this->seg_erased;
|
||||
rd_buf_destroy_segment(rbuf, this);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2017-2022, Magnus Edenhill
|
||||
* Copyright (c) 2017 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -70,8 +70,6 @@ typedef struct rd_segment_s {
|
|||
* beginning in the grand rd_buf_t */
|
||||
void (*seg_free)(void *p); /**< Optional free function for seg_p */
|
||||
int seg_flags; /**< Segment flags */
|
||||
size_t seg_erased; /** Total number of bytes erased from
|
||||
* this segment. */
|
||||
#define RD_SEGMENT_F_RDONLY 0x1 /**< Read-only segment */
|
||||
#define RD_SEGMENT_F_FREE \
|
||||
0x2 /**< Free segment on destroy, \
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* Copyright (c) 2018 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* Copyright (c) 2018 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2017-2022, Magnus Edenhill
|
||||
* Copyright (c) 2017 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2017-2022, Magnus Edenhill
|
||||
* Copyright (c) 2017 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* Copyright (c) 2012-2015 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* Copyright (c) 2012-2018, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* Copyright (c) 2012-2020, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2020-2022, Magnus Edenhill
|
||||
* Copyright (c) 2020 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librd - Rapid Development C library
|
||||
*
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* Copyright (c) 2012, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librd - Rapid Development C library
|
||||
*
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* Copyright (c) 2012, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* Copyright (c) 2018, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* Copyright (c) 2018, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2021-2022, Magnus Edenhill
|
||||
* Copyright (c) 2021 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2021-2022, Magnus Edenhill
|
||||
* Copyright (c) 2021 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* Copyright (c) 2018 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -31,7 +30,6 @@
|
|||
#define _RDINTERVAL_H_
|
||||
|
||||
#include "rd.h"
|
||||
#include "rdrand.h"
|
||||
|
||||
typedef struct rd_interval_s {
|
||||
rd_ts_t ri_ts_last; /* last interval timestamp */
|
||||
|
|
@ -111,22 +109,6 @@ static RD_INLINE RD_UNUSED void rd_interval_reset_to_now(rd_interval_t *ri,
|
|||
ri->ri_backoff = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the interval to 'now' with the given backoff ms and max_jitter as
|
||||
* percentage. The backoff is given just for absolute jitter calculation. If now
|
||||
* is 0, the time will be gathered automatically.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED void
|
||||
rd_interval_reset_to_now_with_jitter(rd_interval_t *ri,
|
||||
rd_ts_t now,
|
||||
int64_t backoff_ms,
|
||||
int max_jitter) {
|
||||
rd_interval_reset_to_now(ri, now);
|
||||
/* We are multiplying by 10 as (backoff_ms * percent * 1000)/100 ->
|
||||
* backoff_ms * jitter * 10 */
|
||||
ri->ri_backoff = backoff_ms * rd_jitter(-max_jitter, max_jitter) * 10;
|
||||
}
|
||||
|
||||
/**
|
||||
* Back off the next interval by `backoff_us` microseconds.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* 2023, Confluent Inc.
|
||||
* Copyright (c) 2012-2013, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -46,7 +45,6 @@
|
|||
#include "rdkafka_topic.h"
|
||||
#include "rdkafka_partition.h"
|
||||
#include "rdkafka_offset.h"
|
||||
#include "rdkafka_telemetry.h"
|
||||
#include "rdkafka_transport.h"
|
||||
#include "rdkafka_cgrp.h"
|
||||
#include "rdkafka_assignor.h"
|
||||
|
|
@ -65,7 +63,6 @@
|
|||
#endif
|
||||
|
||||
#include "rdtime.h"
|
||||
#include "rdmap.h"
|
||||
#include "crc32c.h"
|
||||
#include "rdunittest.h"
|
||||
|
||||
|
|
@ -396,6 +393,14 @@ void rd_kafka_set_log_level(rd_kafka_t *rk, int level) {
|
|||
|
||||
|
||||
|
||||
static const char *rd_kafka_type2str(rd_kafka_type_t type) {
|
||||
static const char *types[] = {
|
||||
[RD_KAFKA_PRODUCER] = "producer",
|
||||
[RD_KAFKA_CONSUMER] = "consumer",
|
||||
};
|
||||
return types[type];
|
||||
}
|
||||
|
||||
#define _ERR_DESC(ENUM, DESC) \
|
||||
[ENUM - RD_KAFKA_RESP_ERR__BEGIN] = {ENUM, &(#ENUM)[18] /*pfx*/, DESC}
|
||||
|
||||
|
|
@ -484,11 +489,6 @@ static const struct rd_kafka_err_desc rd_kafka_err_descs[] = {
|
|||
_ERR_DESC(RD_KAFKA_RESP_ERR__NOOP, "Local: No operation performed"),
|
||||
_ERR_DESC(RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET,
|
||||
"Local: No offset to automatically reset to"),
|
||||
_ERR_DESC(RD_KAFKA_RESP_ERR__LOG_TRUNCATION,
|
||||
"Local: Partition log truncation detected"),
|
||||
_ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_DIFFERENT_RECORD,
|
||||
"Local: an invalid record in the same batch caused "
|
||||
"the failure of this message too."),
|
||||
|
||||
_ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN, "Unknown broker error"),
|
||||
_ERR_DESC(RD_KAFKA_RESP_ERR_NO_ERROR, "Success"),
|
||||
|
|
@ -697,23 +697,7 @@ static const struct rd_kafka_err_desc rd_kafka_err_descs[] = {
|
|||
_ERR_DESC(RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE,
|
||||
"Broker: Request principal deserialization failed during "
|
||||
"forwarding"),
|
||||
_ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_ID, "Broker: Unknown topic id"),
|
||||
_ERR_DESC(RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH,
|
||||
"Broker: The member epoch is fenced by the group coordinator"),
|
||||
_ERR_DESC(RD_KAFKA_RESP_ERR_UNRELEASED_INSTANCE_ID,
|
||||
"Broker: The instance ID is still used by another member in the "
|
||||
"consumer group"),
|
||||
_ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_ASSIGNOR,
|
||||
"Broker: The assignor or its version range is not supported by "
|
||||
"the consumer group"),
|
||||
_ERR_DESC(RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH,
|
||||
"Broker: The member epoch is stale"),
|
||||
_ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_SUBSCRIPTION_ID,
|
||||
"Broker: Client sent a push telemetry request with an invalid or "
|
||||
"outdated subscription ID"),
|
||||
_ERR_DESC(RD_KAFKA_RESP_ERR_TELEMETRY_TOO_LARGE,
|
||||
"Broker: Client sent a push telemetry request larger than the "
|
||||
"maximum size the broker will accept"),
|
||||
|
||||
_ERR_DESC(RD_KAFKA_RESP_ERR__END, NULL)};
|
||||
|
||||
|
||||
|
|
@ -940,8 +924,6 @@ void rd_kafka_destroy_final(rd_kafka_t *rk) {
|
|||
rd_kafka_wrlock(rk);
|
||||
rd_kafka_wrunlock(rk);
|
||||
|
||||
rd_kafka_telemetry_clear(rk, rd_true /*clear_control_flow_fields*/);
|
||||
|
||||
/* Terminate SASL provider */
|
||||
if (rk->rk_conf.sasl.provider)
|
||||
rd_kafka_sasl_term(rk);
|
||||
|
|
@ -964,18 +946,6 @@ void rd_kafka_destroy_final(rd_kafka_t *rk) {
|
|||
rd_kafka_assignment_destroy(rk);
|
||||
if (rk->rk_consumer.q)
|
||||
rd_kafka_q_destroy(rk->rk_consumer.q);
|
||||
rd_avg_destroy(
|
||||
&rk->rk_telemetry.rd_avg_current.rk_avg_poll_idle_ratio);
|
||||
rd_avg_destroy(
|
||||
&rk->rk_telemetry.rd_avg_current.rk_avg_rebalance_latency);
|
||||
rd_avg_destroy(
|
||||
&rk->rk_telemetry.rd_avg_current.rk_avg_commit_latency);
|
||||
rd_avg_destroy(
|
||||
&rk->rk_telemetry.rd_avg_rollover.rk_avg_poll_idle_ratio);
|
||||
rd_avg_destroy(
|
||||
&rk->rk_telemetry.rd_avg_rollover.rk_avg_rebalance_latency);
|
||||
rd_avg_destroy(
|
||||
&rk->rk_telemetry.rd_avg_rollover.rk_avg_commit_latency);
|
||||
}
|
||||
|
||||
/* Purge op-queues */
|
||||
|
|
@ -1018,7 +988,7 @@ void rd_kafka_destroy_final(rd_kafka_t *rk) {
|
|||
mtx_destroy(&rk->rk_init_lock);
|
||||
|
||||
if (rk->rk_full_metadata)
|
||||
rd_kafka_metadata_destroy(&rk->rk_full_metadata->metadata);
|
||||
rd_kafka_metadata_destroy(rk->rk_full_metadata);
|
||||
rd_kafkap_str_destroy(rk->rk_client_id);
|
||||
rd_kafkap_str_destroy(rk->rk_group_id);
|
||||
rd_kafkap_str_destroy(rk->rk_eos.transactional_id);
|
||||
|
|
@ -1104,13 +1074,7 @@ static void rd_kafka_destroy_app(rd_kafka_t *rk, int flags) {
|
|||
rd_kafka_consumer_close(rk);
|
||||
}
|
||||
|
||||
/* Await telemetry termination. This method blocks until the last
|
||||
* PushTelemetry request is sent (if possible). */
|
||||
if (!(flags & RD_KAFKA_DESTROY_F_IMMEDIATE))
|
||||
rd_kafka_telemetry_await_termination(rk);
|
||||
|
||||
/* With the consumer and telemetry closed, terminate the rest of
|
||||
* librdkafka. */
|
||||
/* With the consumer closed, terminate the rest of librdkafka. */
|
||||
rd_atomic32_set(&rk->rk_terminate,
|
||||
flags | RD_KAFKA_DESTROY_F_TERMINATE);
|
||||
|
||||
|
|
@ -1455,14 +1419,13 @@ static RD_INLINE void rd_kafka_stats_emit_toppar(struct _stats_emit *st,
|
|||
* offsets are not (yet) committed.
|
||||
*/
|
||||
if (end_offset != RD_KAFKA_OFFSET_INVALID) {
|
||||
if (rktp->rktp_stored_pos.offset >= 0 &&
|
||||
rktp->rktp_stored_pos.offset <= end_offset)
|
||||
if (rktp->rktp_stored_offset >= 0 &&
|
||||
rktp->rktp_stored_offset <= end_offset)
|
||||
consumer_lag_stored =
|
||||
end_offset - rktp->rktp_stored_pos.offset;
|
||||
if (rktp->rktp_committed_pos.offset >= 0 &&
|
||||
rktp->rktp_committed_pos.offset <= end_offset)
|
||||
consumer_lag =
|
||||
end_offset - rktp->rktp_committed_pos.offset;
|
||||
end_offset - rktp->rktp_stored_offset;
|
||||
if (rktp->rktp_committed_offset >= 0 &&
|
||||
rktp->rktp_committed_offset <= end_offset)
|
||||
consumer_lag = end_offset - rktp->rktp_committed_offset;
|
||||
}
|
||||
|
||||
_st_printf(
|
||||
|
|
@ -1494,14 +1457,10 @@ static RD_INLINE void rd_kafka_stats_emit_toppar(struct _stats_emit *st,
|
|||
", "
|
||||
"\"stored_offset\":%" PRId64
|
||||
", "
|
||||
"\"stored_leader_epoch\":%" PRId32
|
||||
", "
|
||||
"\"commited_offset\":%" PRId64
|
||||
", " /*FIXME: issue #80 */
|
||||
"\"committed_offset\":%" PRId64
|
||||
", "
|
||||
"\"committed_leader_epoch\":%" PRId32
|
||||
", "
|
||||
"\"eof_offset\":%" PRId64
|
||||
", "
|
||||
"\"lo_offset\":%" PRId64
|
||||
|
|
@ -1514,8 +1473,6 @@ static RD_INLINE void rd_kafka_stats_emit_toppar(struct _stats_emit *st,
|
|||
", "
|
||||
"\"consumer_lag_stored\":%" PRId64
|
||||
", "
|
||||
"\"leader_epoch\":%" PRId32
|
||||
", "
|
||||
"\"txmsgs\":%" PRIu64
|
||||
", "
|
||||
"\"txbytes\":%" PRIu64
|
||||
|
|
@ -1545,15 +1502,12 @@ static RD_INLINE void rd_kafka_stats_emit_toppar(struct _stats_emit *st,
|
|||
0, (size_t)0, rd_kafka_q_len(rktp->rktp_fetchq),
|
||||
rd_kafka_q_size(rktp->rktp_fetchq),
|
||||
rd_kafka_fetch_states[rktp->rktp_fetch_state],
|
||||
rktp->rktp_query_pos.offset, offs.fetch_pos.offset,
|
||||
rktp->rktp_app_pos.offset, rktp->rktp_stored_pos.offset,
|
||||
rktp->rktp_stored_pos.leader_epoch,
|
||||
rktp->rktp_committed_pos.offset, /* FIXME: issue #80 */
|
||||
rktp->rktp_committed_pos.offset,
|
||||
rktp->rktp_committed_pos.leader_epoch, offs.eof_offset,
|
||||
rktp->rktp_lo_offset, rktp->rktp_hi_offset, rktp->rktp_ls_offset,
|
||||
consumer_lag, consumer_lag_stored, rktp->rktp_leader_epoch,
|
||||
rd_atomic64_get(&rktp->rktp_c.tx_msgs),
|
||||
rktp->rktp_query_offset, offs.fetch_offset, rktp->rktp_app_offset,
|
||||
rktp->rktp_stored_offset,
|
||||
rktp->rktp_committed_offset, /* FIXME: issue #80 */
|
||||
rktp->rktp_committed_offset, offs.eof_offset, rktp->rktp_lo_offset,
|
||||
rktp->rktp_hi_offset, rktp->rktp_ls_offset, consumer_lag,
|
||||
consumer_lag_stored, rd_atomic64_get(&rktp->rktp_c.tx_msgs),
|
||||
rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes),
|
||||
rd_atomic64_get(&rktp->rktp_c.rx_msgs),
|
||||
rd_atomic64_get(&rktp->rktp_c.rx_msg_bytes),
|
||||
|
|
@ -1615,6 +1569,8 @@ static void rd_kafka_stats_emit_broker_reqs(struct _stats_emit *st,
|
|||
[RD_KAFKAP_AlterReplicaLogDirs] = rd_true,
|
||||
[RD_KAFKAP_DescribeLogDirs] = rd_true,
|
||||
|
||||
[RD_KAFKAP_SaslAuthenticate] = rd_false,
|
||||
|
||||
[RD_KAFKAP_CreateDelegationToken] = rd_true,
|
||||
[RD_KAFKAP_RenewDelegationToken] = rd_true,
|
||||
[RD_KAFKAP_ExpireDelegationToken] = rd_true,
|
||||
|
|
@ -1631,36 +1587,21 @@ static void rd_kafka_stats_emit_broker_reqs(struct _stats_emit *st,
|
|||
[RD_KAFKAP_AlterIsr] = rd_true,
|
||||
[RD_KAFKAP_UpdateFeatures] = rd_true,
|
||||
[RD_KAFKAP_Envelope] = rd_true,
|
||||
[RD_KAFKAP_FetchSnapshot] = rd_true,
|
||||
[RD_KAFKAP_BrokerHeartbeat] = rd_true,
|
||||
[RD_KAFKAP_UnregisterBroker] = rd_true,
|
||||
[RD_KAFKAP_AllocateProducerIds] = rd_true,
|
||||
[RD_KAFKAP_ConsumerGroupHeartbeat] = rd_true,
|
||||
},
|
||||
[3 /*hide-unless-non-zero*/] = {
|
||||
/* Hide Admin requests unless they've been used */
|
||||
[RD_KAFKAP_CreateTopics] = rd_true,
|
||||
[RD_KAFKAP_DeleteTopics] = rd_true,
|
||||
[RD_KAFKAP_DeleteRecords] = rd_true,
|
||||
[RD_KAFKAP_CreatePartitions] = rd_true,
|
||||
[RD_KAFKAP_DescribeAcls] = rd_true,
|
||||
[RD_KAFKAP_CreateAcls] = rd_true,
|
||||
[RD_KAFKAP_DeleteAcls] = rd_true,
|
||||
[RD_KAFKAP_DescribeConfigs] = rd_true,
|
||||
[RD_KAFKAP_AlterConfigs] = rd_true,
|
||||
[RD_KAFKAP_DeleteGroups] = rd_true,
|
||||
[RD_KAFKAP_ListGroups] = rd_true,
|
||||
[RD_KAFKAP_DescribeGroups] = rd_true,
|
||||
[RD_KAFKAP_DescribeLogDirs] = rd_true,
|
||||
[RD_KAFKAP_IncrementalAlterConfigs] = rd_true,
|
||||
[RD_KAFKAP_AlterPartitionReassignments] = rd_true,
|
||||
[RD_KAFKAP_ListPartitionReassignments] = rd_true,
|
||||
[RD_KAFKAP_OffsetDelete] = rd_true,
|
||||
[RD_KAFKAP_DescribeClientQuotas] = rd_true,
|
||||
[RD_KAFKAP_AlterClientQuotas] = rd_true,
|
||||
[RD_KAFKAP_DescribeUserScramCredentials] = rd_true,
|
||||
[RD_KAFKAP_AlterUserScramCredentials] = rd_true,
|
||||
}};
|
||||
[RD_KAFKAP_CreateTopics] = rd_true,
|
||||
[RD_KAFKAP_DeleteTopics] = rd_true,
|
||||
[RD_KAFKAP_DeleteRecords] = rd_true,
|
||||
[RD_KAFKAP_CreatePartitions] = rd_true,
|
||||
[RD_KAFKAP_DescribeAcls] = rd_true,
|
||||
[RD_KAFKAP_CreateAcls] = rd_true,
|
||||
[RD_KAFKAP_DeleteAcls] = rd_true,
|
||||
[RD_KAFKAP_DescribeConfigs] = rd_true,
|
||||
[RD_KAFKAP_AlterConfigs] = rd_true,
|
||||
[RD_KAFKAP_DeleteGroups] = rd_true,
|
||||
[RD_KAFKAP_ListGroups] = rd_true,
|
||||
[RD_KAFKAP_DescribeGroups] = rd_true}};
|
||||
int i;
|
||||
int cnt = 0;
|
||||
|
||||
|
|
@ -2149,10 +2090,7 @@ static int rd_kafka_thread_main(void *arg) {
|
|||
RD_KAFKA_CGRP_STATE_TERM)))) {
|
||||
rd_ts_t sleeptime = rd_kafka_timers_next(
|
||||
&rk->rk_timers, 1000 * 1000 /*1s*/, 1 /*lock*/);
|
||||
/* Use ceiling division to avoid calling serve with a 0 ms
|
||||
* timeout in a tight loop until 1 ms has passed. */
|
||||
int timeout_ms = (sleeptime + 999) / 1000;
|
||||
rd_kafka_q_serve(rk->rk_ops, timeout_ms, 0,
|
||||
rd_kafka_q_serve(rk->rk_ops, (int)(sleeptime / 1000), 0,
|
||||
RD_KAFKA_Q_CB_CALLBACK, NULL, NULL);
|
||||
if (rk->rk_cgrp) /* FIXME: move to timer-triggered */
|
||||
rd_kafka_cgrp_serve(rk->rk_cgrp);
|
||||
|
|
@ -2205,7 +2143,6 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
|
|||
rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR;
|
||||
int ret_errno = 0;
|
||||
const char *conf_err;
|
||||
char *group_remote_assignor_override = NULL;
|
||||
#ifndef _WIN32
|
||||
sigset_t newset, oldset;
|
||||
#endif
|
||||
|
|
@ -2285,9 +2222,6 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
|
|||
rd_interval_init(&rk->rk_suppress.sparse_connect_random);
|
||||
mtx_init(&rk->rk_suppress.sparse_connect_lock, mtx_plain);
|
||||
|
||||
mtx_init(&rk->rk_telemetry.lock, mtx_plain);
|
||||
cnd_init(&rk->rk_telemetry.termination_cnd);
|
||||
|
||||
rd_atomic64_init(&rk->rk_ts_last_poll, rk->rk_ts_created);
|
||||
rd_atomic32_init(&rk->rk_flushing, 0);
|
||||
|
||||
|
|
@ -2400,64 +2334,6 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
if (!rk->rk_conf.group_remote_assignor) {
|
||||
rd_kafka_assignor_t *cooperative_assignor;
|
||||
|
||||
/* Detect if chosen assignor is cooperative
|
||||
* FIXME: remove this compatibility altogether
|
||||
* and apply the breaking changes that will be required
|
||||
* in next major version. */
|
||||
|
||||
cooperative_assignor =
|
||||
rd_kafka_assignor_find(rk, "cooperative-sticky");
|
||||
rk->rk_conf.partition_assignors_cooperative =
|
||||
!rk->rk_conf.partition_assignors.rl_cnt ||
|
||||
(cooperative_assignor &&
|
||||
cooperative_assignor->rkas_enabled);
|
||||
|
||||
if (rk->rk_conf.group_protocol ==
|
||||
RD_KAFKA_GROUP_PROTOCOL_CONSUMER) {
|
||||
/* Default remote assignor to the chosen local one. */
|
||||
if (rk->rk_conf.partition_assignors_cooperative) {
|
||||
group_remote_assignor_override =
|
||||
rd_strdup("uniform");
|
||||
rk->rk_conf.group_remote_assignor =
|
||||
group_remote_assignor_override;
|
||||
} else {
|
||||
rd_kafka_assignor_t *range_assignor =
|
||||
rd_kafka_assignor_find(rk, "range");
|
||||
if (range_assignor &&
|
||||
range_assignor->rkas_enabled) {
|
||||
rd_kafka_log(
|
||||
rk, LOG_WARNING, "ASSIGNOR",
|
||||
"\"range\" assignor is sticky "
|
||||
"with group protocol CONSUMER");
|
||||
group_remote_assignor_override =
|
||||
rd_strdup("range");
|
||||
rk->rk_conf.group_remote_assignor =
|
||||
group_remote_assignor_override;
|
||||
} else {
|
||||
rd_kafka_log(
|
||||
rk, LOG_WARNING, "ASSIGNOR",
|
||||
"roundrobin assignor isn't "
|
||||
"available "
|
||||
"with group protocol CONSUMER, "
|
||||
"using the \"uniform\" one. "
|
||||
"It's similar, "
|
||||
"but it's also sticky");
|
||||
group_remote_assignor_override =
|
||||
rd_strdup("uniform");
|
||||
rk->rk_conf.group_remote_assignor =
|
||||
group_remote_assignor_override;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* When users starts setting properties of the new protocol,
|
||||
* they can only use incremental_assign/unassign. */
|
||||
rk->rk_conf.partition_assignors_cooperative = rd_true;
|
||||
}
|
||||
|
||||
/* Create Mock cluster */
|
||||
rd_atomic32_init(&rk->rk_mock.cluster_cnt, 0);
|
||||
if (rk->rk_conf.mock.broker_cnt > 0) {
|
||||
|
|
@ -2536,9 +2412,8 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
|
|||
|
||||
if (RD_KAFKAP_STR_LEN(rk->rk_group_id) > 0) {
|
||||
/* Create consumer group handle */
|
||||
rk->rk_cgrp = rd_kafka_cgrp_new(
|
||||
rk, rk->rk_conf.group_protocol, rk->rk_group_id,
|
||||
rk->rk_client_id);
|
||||
rk->rk_cgrp = rd_kafka_cgrp_new(rk, rk->rk_group_id,
|
||||
rk->rk_client_id);
|
||||
rk->rk_consumer.q =
|
||||
rd_kafka_q_keep(rk->rk_cgrp->rkcg_q);
|
||||
} else {
|
||||
|
|
@ -2546,29 +2421,6 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
|
|||
rk->rk_consumer.q = rd_kafka_q_keep(rk->rk_rep);
|
||||
}
|
||||
|
||||
rd_avg_init(
|
||||
&rk->rk_telemetry.rd_avg_rollover.rk_avg_poll_idle_ratio,
|
||||
RD_AVG_GAUGE, 0, 1, 2, rk->rk_conf.enable_metrics_push);
|
||||
rd_avg_init(
|
||||
&rk->rk_telemetry.rd_avg_current.rk_avg_poll_idle_ratio,
|
||||
RD_AVG_GAUGE, 0, 1, 2, rk->rk_conf.enable_metrics_push);
|
||||
rd_avg_init(
|
||||
&rk->rk_telemetry.rd_avg_rollover.rk_avg_rebalance_latency,
|
||||
RD_AVG_GAUGE, 0, 500 * 1000, 2,
|
||||
rk->rk_conf.enable_metrics_push);
|
||||
rd_avg_init(
|
||||
&rk->rk_telemetry.rd_avg_current.rk_avg_rebalance_latency,
|
||||
RD_AVG_GAUGE, 0, 900000 * 1000, 2,
|
||||
rk->rk_conf.enable_metrics_push);
|
||||
rd_avg_init(
|
||||
&rk->rk_telemetry.rd_avg_rollover.rk_avg_commit_latency,
|
||||
RD_AVG_GAUGE, 0, 500 * 1000, 2,
|
||||
rk->rk_conf.enable_metrics_push);
|
||||
rd_avg_init(
|
||||
&rk->rk_telemetry.rd_avg_current.rk_avg_commit_latency,
|
||||
RD_AVG_GAUGE, 0, 500 * 1000, 2,
|
||||
rk->rk_conf.enable_metrics_push);
|
||||
|
||||
} else if (type == RD_KAFKA_PRODUCER) {
|
||||
rk->rk_eos.transactional_id =
|
||||
rd_kafkap_str_new(rk->rk_conf.eos.transactional_id, -1);
|
||||
|
|
@ -2647,8 +2499,7 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
|
|||
|
||||
/* Add initial list of brokers from configuration */
|
||||
if (rk->rk_conf.brokerlist) {
|
||||
if (rd_kafka_brokers_add0(rk, rk->rk_conf.brokerlist,
|
||||
rd_true) == 0)
|
||||
if (rd_kafka_brokers_add0(rk, rk->rk_conf.brokerlist) == 0)
|
||||
rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN,
|
||||
"No brokers configured");
|
||||
}
|
||||
|
|
@ -2750,8 +2601,6 @@ fail:
|
|||
* that belong to rk_conf and thus needs to be cleaned up.
|
||||
* Legacy APIs, sigh.. */
|
||||
if (app_conf) {
|
||||
if (group_remote_assignor_override)
|
||||
rd_free(group_remote_assignor_override);
|
||||
rd_kafka_assignors_term(rk);
|
||||
rd_kafka_interceptors_destroy(&rk->rk_conf);
|
||||
memset(&rk->rk_conf, 0, sizeof(rk->rk_conf));
|
||||
|
|
@ -2852,8 +2701,7 @@ static RD_UNUSED int rd_kafka_consume_start0(rd_kafka_topic_t *rkt,
|
|||
return -1;
|
||||
}
|
||||
|
||||
rd_kafka_toppar_op_fetch_start(rktp, RD_KAFKA_FETCH_POS(offset, -1),
|
||||
rkq, RD_KAFKA_NO_REPLYQ);
|
||||
rd_kafka_toppar_op_fetch_start(rktp, offset, rkq, RD_KAFKA_NO_REPLYQ);
|
||||
|
||||
rd_kafka_toppar_destroy(rktp);
|
||||
|
||||
|
|
@ -2965,8 +2813,7 @@ rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *app_rkt,
|
|||
replyq = RD_KAFKA_REPLYQ(tmpq, 0);
|
||||
}
|
||||
|
||||
if ((err = rd_kafka_toppar_op_seek(rktp, RD_KAFKA_FETCH_POS(offset, -1),
|
||||
replyq))) {
|
||||
if ((err = rd_kafka_toppar_op_seek(rktp, offset, replyq))) {
|
||||
if (tmpq)
|
||||
rd_kafka_q_destroy_owner(tmpq);
|
||||
rd_kafka_toppar_destroy(rktp);
|
||||
|
|
@ -3018,9 +2865,8 @@ rd_kafka_seek_partitions(rd_kafka_t *rk,
|
|||
continue;
|
||||
}
|
||||
|
||||
err = rd_kafka_toppar_op_seek(
|
||||
rktp, rd_kafka_topic_partition_get_fetch_pos(rktpar),
|
||||
RD_KAFKA_REPLYQ(tmpq, 0));
|
||||
err = rd_kafka_toppar_op_seek(rktp, rktpar->offset,
|
||||
RD_KAFKA_REPLYQ(tmpq, 0));
|
||||
if (err) {
|
||||
rktpar->err = err;
|
||||
} else {
|
||||
|
|
@ -3038,8 +2884,7 @@ rd_kafka_seek_partitions(rd_kafka_t *rk,
|
|||
while (cnt > 0) {
|
||||
rd_kafka_op_t *rko;
|
||||
|
||||
rko =
|
||||
rd_kafka_q_pop(tmpq, rd_timeout_remains_us(abs_timeout), 0);
|
||||
rko = rd_kafka_q_pop(tmpq, rd_timeout_remains(abs_timeout), 0);
|
||||
if (!rko) {
|
||||
rd_kafka_q_destroy_owner(tmpq);
|
||||
|
||||
|
|
@ -3178,7 +3023,8 @@ static rd_kafka_op_res_t rd_kafka_consume_callback0(
|
|||
struct consume_ctx ctx = {.consume_cb = consume_cb, .opaque = opaque};
|
||||
rd_kafka_op_res_t res;
|
||||
|
||||
rd_kafka_app_poll_start(rkq->rkq_rk, 0, timeout_ms);
|
||||
if (timeout_ms)
|
||||
rd_kafka_app_poll_blocking(rkq->rkq_rk);
|
||||
|
||||
res = rd_kafka_q_serve(rkq, timeout_ms, max_cnt, RD_KAFKA_Q_CB_RETURN,
|
||||
rd_kafka_consume_cb, &ctx);
|
||||
|
|
@ -3246,15 +3092,16 @@ static rd_kafka_message_t *
|
|||
rd_kafka_consume0(rd_kafka_t *rk, rd_kafka_q_t *rkq, int timeout_ms) {
|
||||
rd_kafka_op_t *rko;
|
||||
rd_kafka_message_t *rkmessage = NULL;
|
||||
rd_ts_t now = rd_clock();
|
||||
rd_ts_t abs_timeout = rd_timeout_init0(now, timeout_ms);
|
||||
rd_ts_t abs_timeout = rd_timeout_init(timeout_ms);
|
||||
|
||||
rd_kafka_app_poll_start(rk, now, timeout_ms);
|
||||
if (timeout_ms)
|
||||
rd_kafka_app_poll_blocking(rk);
|
||||
|
||||
rd_kafka_yield_thread = 0;
|
||||
while ((
|
||||
rko = rd_kafka_q_pop(rkq, rd_timeout_remains_us(abs_timeout), 0))) {
|
||||
rd_kafka_op_res_t res;
|
||||
|
||||
res =
|
||||
rd_kafka_poll_cb(rk, rkq, rko, RD_KAFKA_Q_CB_RETURN, NULL);
|
||||
|
||||
|
|
@ -3572,12 +3419,10 @@ rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions) {
|
|||
}
|
||||
|
||||
rd_kafka_toppar_lock(rktp);
|
||||
rd_kafka_topic_partition_set_from_fetch_pos(rktpar,
|
||||
rktp->rktp_app_pos);
|
||||
rktpar->offset = rktp->rktp_app_offset;
|
||||
rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
|
||||
rd_kafka_toppar_unlock(rktp);
|
||||
rd_kafka_toppar_destroy(rktp);
|
||||
|
||||
rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
|
||||
}
|
||||
|
||||
return RD_KAFKA_RESP_ERR_NO_ERROR;
|
||||
|
|
@ -3604,7 +3449,6 @@ static void rd_kafka_query_wmark_offsets_resp_cb(rd_kafka_t *rk,
|
|||
struct _query_wmark_offsets_state *state;
|
||||
rd_kafka_topic_partition_list_t *offsets;
|
||||
rd_kafka_topic_partition_t *rktpar;
|
||||
int actions = 0;
|
||||
|
||||
if (err == RD_KAFKA_RESP_ERR__DESTROY) {
|
||||
/* 'state' has gone out of scope when query_watermark..()
|
||||
|
|
@ -3616,15 +3460,7 @@ static void rd_kafka_query_wmark_offsets_resp_cb(rd_kafka_t *rk,
|
|||
|
||||
offsets = rd_kafka_topic_partition_list_new(1);
|
||||
err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request, offsets,
|
||||
&actions);
|
||||
|
||||
if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
|
||||
/* Remove its cache in case the topic isn't a known topic. */
|
||||
rd_kafka_wrlock(rk);
|
||||
rd_kafka_metadata_cache_delete_by_name(rk, state->topic);
|
||||
rd_kafka_wrunlock(rk);
|
||||
}
|
||||
|
||||
NULL);
|
||||
if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) {
|
||||
rd_kafka_topic_partition_list_destroy(offsets);
|
||||
return; /* Retrying */
|
||||
|
|
@ -3645,18 +3481,14 @@ static void rd_kafka_query_wmark_offsets_resp_cb(rd_kafka_t *rk,
|
|||
/* FALLTHRU */
|
||||
}
|
||||
|
||||
rktpar = rd_kafka_topic_partition_list_find(offsets, state->topic,
|
||||
state->partition);
|
||||
if (!rktpar && err > RD_KAFKA_RESP_ERR__END) {
|
||||
/* Partition not seen in response,
|
||||
* not a local error. */
|
||||
/* Partition not seen in response. */
|
||||
if (!(rktpar = rd_kafka_topic_partition_list_find(offsets, state->topic,
|
||||
state->partition)))
|
||||
err = RD_KAFKA_RESP_ERR__BAD_MSG;
|
||||
} else if (rktpar) {
|
||||
if (rktpar->err)
|
||||
err = rktpar->err;
|
||||
else
|
||||
state->offsets[state->offidx] = rktpar->offset;
|
||||
}
|
||||
else if (rktpar->err)
|
||||
err = rktpar->err;
|
||||
else
|
||||
state->offsets[state->offidx] = rktpar->offset;
|
||||
|
||||
state->offidx++;
|
||||
|
||||
|
|
@ -3712,25 +3544,26 @@ rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk,
|
|||
state.ts_end = ts_end;
|
||||
state.state_version = rd_kafka_brokers_get_state_version(rk);
|
||||
|
||||
|
||||
rktpar->offset = RD_KAFKA_OFFSET_BEGINNING;
|
||||
rd_kafka_ListOffsetsRequest(
|
||||
leader->rkb, partitions, RD_KAFKA_REPLYQ(rkq, 0),
|
||||
rd_kafka_query_wmark_offsets_resp_cb, timeout_ms, &state);
|
||||
rd_kafka_query_wmark_offsets_resp_cb, &state);
|
||||
|
||||
rktpar->offset = RD_KAFKA_OFFSET_END;
|
||||
rd_kafka_ListOffsetsRequest(
|
||||
leader->rkb, partitions, RD_KAFKA_REPLYQ(rkq, 0),
|
||||
rd_kafka_query_wmark_offsets_resp_cb, timeout_ms, &state);
|
||||
rd_kafka_query_wmark_offsets_resp_cb, &state);
|
||||
|
||||
rd_kafka_topic_partition_list_destroy(partitions);
|
||||
rd_list_destroy(&leaders);
|
||||
|
||||
/* Wait for reply (or timeout) */
|
||||
while (state.err == RD_KAFKA_RESP_ERR__IN_PROGRESS) {
|
||||
rd_kafka_q_serve(rkq, RD_POLL_INFINITE, 0,
|
||||
RD_KAFKA_Q_CB_CALLBACK, rd_kafka_poll_cb,
|
||||
NULL);
|
||||
}
|
||||
while (state.err == RD_KAFKA_RESP_ERR__IN_PROGRESS &&
|
||||
rd_kafka_q_serve(rkq, 100, 0, RD_KAFKA_Q_CB_CALLBACK,
|
||||
rd_kafka_poll_cb,
|
||||
NULL) != RD_KAFKA_OP_RES_YIELD)
|
||||
;
|
||||
|
||||
rd_kafka_q_destroy_owner(rkq);
|
||||
|
||||
|
|
@ -3870,7 +3703,7 @@ rd_kafka_offsets_for_times(rd_kafka_t *rk,
|
|||
state.wait_reply++;
|
||||
rd_kafka_ListOffsetsRequest(
|
||||
leader->rkb, leader->partitions, RD_KAFKA_REPLYQ(rkq, 0),
|
||||
rd_kafka_get_offsets_for_times_resp_cb, timeout_ms, &state);
|
||||
rd_kafka_get_offsets_for_times_resp_cb, &state);
|
||||
}
|
||||
|
||||
rd_list_destroy(&leaders);
|
||||
|
|
@ -3927,8 +3760,7 @@ rd_kafka_op_res_t rd_kafka_poll_cb(rd_kafka_t *rk,
|
|||
cb_type == RD_KAFKA_Q_CB_FORCE_RETURN)
|
||||
return RD_KAFKA_OP_RES_PASS; /* Dont handle here */
|
||||
else {
|
||||
rk->rk_ts_last_poll_end = rd_clock();
|
||||
struct consume_ctx ctx = {.consume_cb =
|
||||
struct consume_ctx ctx = {.consume_cb =
|
||||
rk->rk_conf.consume_cb,
|
||||
.opaque = rk->rk_conf.opaque};
|
||||
|
||||
|
|
@ -4087,7 +3919,6 @@ rd_kafka_op_res_t rd_kafka_poll_cb(rd_kafka_t *rk,
|
|||
case RD_KAFKA_OP_DELETETOPICS:
|
||||
case RD_KAFKA_OP_CREATEPARTITIONS:
|
||||
case RD_KAFKA_OP_ALTERCONFIGS:
|
||||
case RD_KAFKA_OP_INCREMENTALALTERCONFIGS:
|
||||
case RD_KAFKA_OP_DESCRIBECONFIGS:
|
||||
case RD_KAFKA_OP_DELETERECORDS:
|
||||
case RD_KAFKA_OP_DELETEGROUPS:
|
||||
|
|
@ -4095,7 +3926,6 @@ rd_kafka_op_res_t rd_kafka_poll_cb(rd_kafka_t *rk,
|
|||
case RD_KAFKA_OP_CREATEACLS:
|
||||
case RD_KAFKA_OP_DESCRIBEACLS:
|
||||
case RD_KAFKA_OP_DELETEACLS:
|
||||
case RD_KAFKA_OP_LISTOFFSETS:
|
||||
/* Calls op_destroy() from worker callback,
|
||||
* when the time comes. */
|
||||
res = rd_kafka_op_call(rk, rkq, rko);
|
||||
|
|
@ -4122,19 +3952,6 @@ rd_kafka_op_res_t rd_kafka_poll_cb(rd_kafka_t *rk,
|
|||
rd_kafka_purge(rk, rko->rko_u.purge.flags);
|
||||
break;
|
||||
|
||||
case RD_KAFKA_OP_SET_TELEMETRY_BROKER:
|
||||
rd_kafka_set_telemetry_broker_maybe(
|
||||
rk, rko->rko_u.telemetry_broker.rkb);
|
||||
break;
|
||||
|
||||
case RD_KAFKA_OP_TERMINATE_TELEMETRY:
|
||||
rd_kafka_telemetry_schedule_termination(rko->rko_rk);
|
||||
break;
|
||||
|
||||
case RD_KAFKA_OP_METADATA_UPDATE:
|
||||
res = rd_kafka_metadata_update_op(rk, rko->rko_u.metadata.mdi);
|
||||
break;
|
||||
|
||||
default:
|
||||
/* If op has a callback set (e.g., OAUTHBEARER_REFRESH),
|
||||
* call it. */
|
||||
|
|
@ -4157,8 +3974,14 @@ rd_kafka_op_res_t rd_kafka_poll_cb(rd_kafka_t *rk,
|
|||
int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms) {
|
||||
int r;
|
||||
|
||||
if (timeout_ms)
|
||||
rd_kafka_app_poll_blocking(rk);
|
||||
|
||||
r = rd_kafka_q_serve(rk->rk_rep, timeout_ms, 0, RD_KAFKA_Q_CB_CALLBACK,
|
||||
rd_kafka_poll_cb, NULL);
|
||||
|
||||
rd_kafka_app_polled(rk);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
@ -4166,9 +3989,13 @@ int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms) {
|
|||
rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms) {
|
||||
rd_kafka_op_t *rko;
|
||||
|
||||
if (timeout_ms)
|
||||
rd_kafka_app_poll_blocking(rkqu->rkqu_rk);
|
||||
|
||||
rko = rd_kafka_q_pop_serve(rkqu->rkqu_q, rd_timeout_us(timeout_ms), 0,
|
||||
RD_KAFKA_Q_CB_EVENT, rd_kafka_poll_cb, NULL);
|
||||
|
||||
rd_kafka_app_polled(rkqu->rkqu_rk);
|
||||
|
||||
if (!rko)
|
||||
return NULL;
|
||||
|
|
@ -4179,8 +4006,14 @@ rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms) {
|
|||
int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms) {
|
||||
int r;
|
||||
|
||||
if (timeout_ms)
|
||||
rd_kafka_app_poll_blocking(rkqu->rkqu_rk);
|
||||
|
||||
r = rd_kafka_q_serve(rkqu->rkqu_q, timeout_ms, 0,
|
||||
RD_KAFKA_Q_CB_CALLBACK, rd_kafka_poll_cb, NULL);
|
||||
|
||||
rd_kafka_app_polled(rkqu->rkqu_rk);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
@ -4737,26 +4570,6 @@ rd_kafka_consumer_group_state_code(const char *name) {
|
|||
return RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN;
|
||||
}
|
||||
|
||||
static const char *rd_kafka_consumer_group_type_names[] = {
|
||||
"Unknown", "Consumer", "Classic"};
|
||||
|
||||
const char *
|
||||
rd_kafka_consumer_group_type_name(rd_kafka_consumer_group_type_t type) {
|
||||
if (type < 0 || type >= RD_KAFKA_CONSUMER_GROUP_TYPE__CNT)
|
||||
return NULL;
|
||||
return rd_kafka_consumer_group_type_names[type];
|
||||
}
|
||||
|
||||
rd_kafka_consumer_group_type_t
|
||||
rd_kafka_consumer_group_type_code(const char *name) {
|
||||
size_t i;
|
||||
for (i = 0; i < RD_KAFKA_CONSUMER_GROUP_TYPE__CNT; i++) {
|
||||
if (!rd_strcasecmp(rd_kafka_consumer_group_type_names[i], name))
|
||||
return i;
|
||||
}
|
||||
return RD_KAFKA_CONSUMER_GROUP_TYPE_UNKNOWN;
|
||||
}
|
||||
|
||||
static void rd_kafka_DescribeGroups_resp_cb(rd_kafka_t *rk,
|
||||
rd_kafka_broker_t *rkb,
|
||||
rd_kafka_resp_err_t err,
|
||||
|
|
@ -4838,8 +4651,8 @@ static void rd_kafka_DescribeGroups_resp_cb(rd_kafka_t *rk,
|
|||
rd_kafka_buf_read_str(reply, &MemberId);
|
||||
rd_kafka_buf_read_str(reply, &ClientId);
|
||||
rd_kafka_buf_read_str(reply, &ClientHost);
|
||||
rd_kafka_buf_read_kbytes(reply, &Meta);
|
||||
rd_kafka_buf_read_kbytes(reply, &Assignment);
|
||||
rd_kafka_buf_read_bytes(reply, &Meta);
|
||||
rd_kafka_buf_read_bytes(reply, &Assignment);
|
||||
|
||||
mi->member_id = RD_KAFKAP_STR_DUP(&MemberId);
|
||||
mi->client_id = RD_KAFKAP_STR_DUP(&ClientId);
|
||||
|
|
@ -4941,9 +4754,7 @@ static void rd_kafka_ListGroups_resp_cb(rd_kafka_t *rk,
|
|||
|
||||
state->wait_cnt++;
|
||||
error = rd_kafka_DescribeGroupsRequest(
|
||||
rkb, 0, grps, i,
|
||||
rd_false /* don't include authorized operations */,
|
||||
RD_KAFKA_REPLYQ(state->q, 0),
|
||||
rkb, 0, grps, i, RD_KAFKA_REPLYQ(state->q, 0),
|
||||
rd_kafka_DescribeGroups_resp_cb, state);
|
||||
if (error) {
|
||||
rd_kafka_DescribeGroups_resp_cb(
|
||||
|
|
@ -5022,7 +4833,7 @@ rd_kafka_list_groups(rd_kafka_t *rk,
|
|||
state.wait_cnt++;
|
||||
rkb_cnt++;
|
||||
error = rd_kafka_ListGroupsRequest(
|
||||
rkb, 0, NULL, 0, NULL, 0, RD_KAFKA_REPLYQ(state.q, 0),
|
||||
rkb, 0, NULL, 0, RD_KAFKA_REPLYQ(state.q, 0),
|
||||
rd_kafka_ListGroups_resp_cb, &state);
|
||||
if (error) {
|
||||
rd_kafka_ListGroups_resp_cb(rk, rkb,
|
||||
|
|
@ -5198,154 +5009,3 @@ int rd_kafka_errno(void) {
|
|||
int rd_kafka_unittest(void) {
|
||||
return rd_unittest();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Creates a new UUID.
|
||||
*
|
||||
* @return A newly allocated UUID.
|
||||
*/
|
||||
rd_kafka_Uuid_t *rd_kafka_Uuid_new(int64_t most_significant_bits,
|
||||
int64_t least_significant_bits) {
|
||||
rd_kafka_Uuid_t *uuid = rd_calloc(1, sizeof(rd_kafka_Uuid_t));
|
||||
uuid->most_significant_bits = most_significant_bits;
|
||||
uuid->least_significant_bits = least_significant_bits;
|
||||
return uuid;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a newly allocated copy of the given UUID.
|
||||
*
|
||||
* @param uuid UUID to copy.
|
||||
* @return Copy of the provided UUID.
|
||||
*
|
||||
* @remark Dynamically allocated. Deallocate (free) after use.
|
||||
*/
|
||||
rd_kafka_Uuid_t *rd_kafka_Uuid_copy(const rd_kafka_Uuid_t *uuid) {
|
||||
rd_kafka_Uuid_t *copy_uuid = rd_kafka_Uuid_new(
|
||||
uuid->most_significant_bits, uuid->least_significant_bits);
|
||||
if (*uuid->base64str)
|
||||
memcpy(copy_uuid->base64str, uuid->base64str, 23);
|
||||
return copy_uuid;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new non cryptographically secure UUIDv4 (random).
|
||||
*
|
||||
* @return A UUIDv4.
|
||||
*
|
||||
* @remark Must be freed after use using rd_kafka_Uuid_destroy().
|
||||
*/
|
||||
rd_kafka_Uuid_t rd_kafka_Uuid_random() {
|
||||
int i;
|
||||
unsigned char rand_values_bytes[16] = {0};
|
||||
uint64_t *rand_values_uint64 = (uint64_t *)rand_values_bytes;
|
||||
unsigned char *rand_values_app;
|
||||
rd_kafka_Uuid_t ret = RD_KAFKA_UUID_ZERO;
|
||||
for (i = 0; i < 16; i += 2) {
|
||||
uint16_t rand_uint16 = (uint16_t)rd_jitter(0, INT16_MAX - 1);
|
||||
/* No need to convert endianess here because it's still only
|
||||
* a random value. */
|
||||
rand_values_app = (unsigned char *)&rand_uint16;
|
||||
rand_values_bytes[i] |= rand_values_app[0];
|
||||
rand_values_bytes[i + 1] |= rand_values_app[1];
|
||||
}
|
||||
|
||||
rand_values_bytes[6] &= 0x0f; /* clear version */
|
||||
rand_values_bytes[6] |= 0x40; /* version 4 */
|
||||
rand_values_bytes[8] &= 0x3f; /* clear variant */
|
||||
rand_values_bytes[8] |= 0x80; /* IETF variant */
|
||||
|
||||
ret.most_significant_bits = be64toh(rand_values_uint64[0]);
|
||||
ret.least_significant_bits = be64toh(rand_values_uint64[1]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the provided uuid.
|
||||
*
|
||||
* @param uuid UUID
|
||||
*/
|
||||
void rd_kafka_Uuid_destroy(rd_kafka_Uuid_t *uuid) {
|
||||
rd_free(uuid);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Computes canonical encoding for the given uuid string.
|
||||
* Mainly useful for testing.
|
||||
*
|
||||
* @param uuid UUID for which canonical encoding is required.
|
||||
*
|
||||
* @return canonical encoded string for the given UUID.
|
||||
*
|
||||
* @remark Must be freed after use.
|
||||
*/
|
||||
const char *rd_kafka_Uuid_str(const rd_kafka_Uuid_t *uuid) {
|
||||
int i, j;
|
||||
unsigned char bytes[16];
|
||||
char *ret = rd_calloc(37, sizeof(*ret));
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
j = 7 - i;
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
j = i;
|
||||
#endif
|
||||
bytes[i] = (uuid->most_significant_bits >> (8 * j)) & 0xFF;
|
||||
bytes[8 + i] = (uuid->least_significant_bits >> (8 * j)) & 0xFF;
|
||||
}
|
||||
|
||||
rd_snprintf(ret, 37,
|
||||
"%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%"
|
||||
"02x%02x%02x",
|
||||
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5],
|
||||
bytes[6], bytes[7], bytes[8], bytes[9], bytes[10],
|
||||
bytes[11], bytes[12], bytes[13], bytes[14], bytes[15]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
const char *rd_kafka_Uuid_base64str(const rd_kafka_Uuid_t *uuid) {
|
||||
if (*uuid->base64str)
|
||||
return uuid->base64str;
|
||||
|
||||
rd_chariov_t in_base64;
|
||||
char *out_base64_str;
|
||||
char *uuid_bytes;
|
||||
uint64_t input_uuid[2];
|
||||
|
||||
input_uuid[0] = htobe64(uuid->most_significant_bits);
|
||||
input_uuid[1] = htobe64(uuid->least_significant_bits);
|
||||
uuid_bytes = (char *)input_uuid;
|
||||
in_base64.ptr = uuid_bytes;
|
||||
in_base64.size = sizeof(uuid->most_significant_bits) +
|
||||
sizeof(uuid->least_significant_bits);
|
||||
|
||||
out_base64_str = rd_base64_encode_str(&in_base64);
|
||||
if (!out_base64_str)
|
||||
return NULL;
|
||||
|
||||
rd_strlcpy((char *)uuid->base64str, out_base64_str,
|
||||
23 /* Removing extra ('=') padding */);
|
||||
rd_free(out_base64_str);
|
||||
return uuid->base64str;
|
||||
}
|
||||
|
||||
unsigned int rd_kafka_Uuid_hash(const rd_kafka_Uuid_t *uuid) {
|
||||
unsigned char bytes[16];
|
||||
memcpy(bytes, &uuid->most_significant_bits, 8);
|
||||
memcpy(&bytes[8], &uuid->least_significant_bits, 8);
|
||||
return rd_bytes_hash(bytes, 16);
|
||||
}
|
||||
|
||||
unsigned int rd_kafka_Uuid_map_hash(const void *key) {
|
||||
return rd_kafka_Uuid_hash(key);
|
||||
}
|
||||
|
||||
int64_t rd_kafka_Uuid_least_significant_bits(const rd_kafka_Uuid_t *uuid) {
|
||||
return uuid->least_significant_bits;
|
||||
}
|
||||
|
||||
|
||||
int64_t rd_kafka_Uuid_most_significant_bits(const rd_kafka_Uuid_t *uuid) {
|
||||
return uuid->most_significant_bits;
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* 2023, Confluent Inc.
|
||||
* Copyright (c) 2018 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -32,18 +31,10 @@
|
|||
|
||||
|
||||
#include "rdstring.h"
|
||||
#include "rdmap.h"
|
||||
#include "rdkafka_error.h"
|
||||
#include "rdkafka_confval.h"
|
||||
#if WITH_SSL
|
||||
typedef struct rd_kafka_broker_s rd_kafka_broker_t;
|
||||
extern int rd_kafka_ssl_hmac(rd_kafka_broker_t *rkb,
|
||||
const EVP_MD *evp,
|
||||
const rd_chariov_t *in,
|
||||
const rd_chariov_t *salt,
|
||||
int itcnt,
|
||||
rd_chariov_t *out);
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief Common AdminOptions type used for all admin APIs.
|
||||
|
|
@ -78,9 +69,15 @@ struct rd_kafka_AdminOptions_s {
|
|||
* CreateTopics
|
||||
* CreatePartitions
|
||||
* AlterConfigs
|
||||
* IncrementalAlterConfigs
|
||||
*/
|
||||
|
||||
rd_kafka_confval_t incremental; /**< BOOL: Incremental rather than
|
||||
* absolute application
|
||||
* of config.
|
||||
* Valid for:
|
||||
* AlterConfigs
|
||||
*/
|
||||
|
||||
rd_kafka_confval_t broker; /**< INT: Explicitly override
|
||||
* broker id to send
|
||||
* requests to.
|
||||
|
|
@ -94,14 +91,6 @@ struct rd_kafka_AdminOptions_s {
|
|||
* Valid for:
|
||||
* ListConsumerGroupOffsets
|
||||
*/
|
||||
rd_kafka_confval_t
|
||||
include_authorized_operations; /**< BOOL: Whether broker should
|
||||
* return authorized operations.
|
||||
* Valid for:
|
||||
* DescribeConsumerGroups
|
||||
* DescribeCluster
|
||||
* DescribeTopics
|
||||
*/
|
||||
|
||||
rd_kafka_confval_t
|
||||
match_consumer_group_states; /**< PTR: list of consumer group states
|
||||
|
|
@ -109,19 +98,6 @@ struct rd_kafka_AdminOptions_s {
|
|||
* Valid for: ListConsumerGroups.
|
||||
*/
|
||||
|
||||
rd_kafka_confval_t
|
||||
match_consumer_group_types; /**< PTR: list of consumer group types
|
||||
* to query for.
|
||||
* Valid for: ListConsumerGroups.
|
||||
*/
|
||||
|
||||
rd_kafka_confval_t
|
||||
isolation_level; /**< INT:Isolation Level needed for list Offset
|
||||
* to query for.
|
||||
* Default Set to
|
||||
* RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED
|
||||
*/
|
||||
|
||||
rd_kafka_confval_t opaque; /**< PTR: Application opaque.
|
||||
* Valid for all. */
|
||||
};
|
||||
|
|
@ -212,6 +188,13 @@ struct rd_kafka_NewPartitions_s {
|
|||
* @{
|
||||
*/
|
||||
|
||||
/* KIP-248 */
|
||||
typedef enum rd_kafka_AlterOperation_t {
|
||||
RD_KAFKA_ALTER_OP_ADD = 0,
|
||||
RD_KAFKA_ALTER_OP_SET = 1,
|
||||
RD_KAFKA_ALTER_OP_DELETE = 2,
|
||||
} rd_kafka_AlterOperation_t;
|
||||
|
||||
struct rd_kafka_ConfigEntry_s {
|
||||
rd_strtup_t *kv; /**< Name/Value pair */
|
||||
|
||||
|
|
@ -219,9 +202,8 @@ struct rd_kafka_ConfigEntry_s {
|
|||
|
||||
/* Attributes: this is a struct for easy copying */
|
||||
struct {
|
||||
/** Operation type, used for IncrementalAlterConfigs */
|
||||
rd_kafka_AlterConfigOpType_t op_type;
|
||||
rd_kafka_ConfigSource_t source; /**< Config source */
|
||||
rd_kafka_AlterOperation_t operation; /**< Operation */
|
||||
rd_kafka_ConfigSource_t source; /**< Config source */
|
||||
rd_bool_t is_readonly; /**< Value is read-only (on broker) */
|
||||
rd_bool_t is_default; /**< Value is at its default */
|
||||
rd_bool_t is_sensitive; /**< Value is sensitive */
|
||||
|
|
@ -268,10 +250,6 @@ struct rd_kafka_AlterConfigs_result_s {
|
|||
rd_list_t resources; /**< Type (rd_kafka_ConfigResource_t *) */
|
||||
};
|
||||
|
||||
struct rd_kafka_IncrementalAlterConfigs_result_s {
|
||||
rd_list_t resources; /**< Type (rd_kafka_ConfigResource_t *) */
|
||||
};
|
||||
|
||||
struct rd_kafka_ConfigResource_result_s {
|
||||
rd_list_t resources; /**< Type (struct rd_kafka_ConfigResource *):
|
||||
* List of config resources, sans config
|
||||
|
|
@ -320,47 +298,6 @@ struct rd_kafka_DeleteRecords_s {
|
|||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name ListConsumerGroupOffsets
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief ListConsumerGroupOffsets result
|
||||
*/
|
||||
struct rd_kafka_ListConsumerGroupOffsets_result_s {
|
||||
rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */
|
||||
};
|
||||
|
||||
struct rd_kafka_ListConsumerGroupOffsets_s {
|
||||
char *group_id; /**< Points to data */
|
||||
rd_kafka_topic_partition_list_t *partitions;
|
||||
char data[1]; /**< The group id is allocated along with
|
||||
* the struct here. */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name AlterConsumerGroupOffsets
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief AlterConsumerGroupOffsets result
|
||||
*/
|
||||
struct rd_kafka_AlterConsumerGroupOffsets_result_s {
|
||||
rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */
|
||||
};
|
||||
|
||||
struct rd_kafka_AlterConsumerGroupOffsets_s {
|
||||
char *group_id; /**< Points to data */
|
||||
rd_kafka_topic_partition_list_t *partitions;
|
||||
char data[1]; /**< The group id is allocated along with
|
||||
* the struct here. */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name DeleteConsumerGroupOffsets
|
||||
|
|
@ -383,24 +320,6 @@ struct rd_kafka_DeleteConsumerGroupOffsets_s {
|
|||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name ListOffsets
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @struct ListOffsets result about a single partition
|
||||
*/
|
||||
struct rd_kafka_ListOffsetsResultInfo_s {
|
||||
rd_kafka_topic_partition_t *topic_partition;
|
||||
int64_t timestamp;
|
||||
};
|
||||
|
||||
rd_kafka_ListOffsetsResultInfo_t *
|
||||
rd_kafka_ListOffsetsResultInfo_new(rd_kafka_topic_partition_t *rktpar,
|
||||
rd_ts_t timestamp);
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name CreateAcls
|
||||
* @{
|
||||
|
|
@ -438,6 +357,50 @@ struct rd_kafka_DeleteAcls_result_response_s {
|
|||
|
||||
/**@}*/
|
||||
|
||||
|
||||
/**
|
||||
* @name AlterConsumerGroupOffsets
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief AlterConsumerGroupOffsets result
|
||||
*/
|
||||
struct rd_kafka_AlterConsumerGroupOffsets_result_s {
|
||||
rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */
|
||||
};
|
||||
|
||||
struct rd_kafka_AlterConsumerGroupOffsets_s {
|
||||
char *group_id; /**< Points to data */
|
||||
rd_kafka_topic_partition_list_t *partitions;
|
||||
char data[1]; /**< The group id is allocated along with
|
||||
* the struct here. */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
||||
|
||||
/**
|
||||
* @name ListConsumerGroupOffsets
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief ListConsumerGroupOffsets result
|
||||
*/
|
||||
struct rd_kafka_ListConsumerGroupOffsets_result_s {
|
||||
rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */
|
||||
};
|
||||
|
||||
struct rd_kafka_ListConsumerGroupOffsets_s {
|
||||
char *group_id; /**< Points to data */
|
||||
rd_kafka_topic_partition_list_t *partitions;
|
||||
char data[1]; /**< The group id is allocated along with
|
||||
* the struct here. */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name ListConsumerGroups
|
||||
* @{
|
||||
|
|
@ -451,7 +414,6 @@ struct rd_kafka_ConsumerGroupListing_s {
|
|||
/** Is it a simple consumer group? That means empty protocol_type. */
|
||||
rd_bool_t is_simple_consumer_group;
|
||||
rd_kafka_consumer_group_state_t state; /**< Consumer group state. */
|
||||
rd_kafka_consumer_group_type_t type; /**< Consumer group type. */
|
||||
};
|
||||
|
||||
|
||||
|
|
@ -511,109 +473,10 @@ struct rd_kafka_ConsumerGroupDescription_s {
|
|||
rd_kafka_consumer_group_state_t state;
|
||||
/** Consumer group coordinator. */
|
||||
rd_kafka_Node_t *coordinator;
|
||||
/** Count of operations allowed for topic. -1 indicates operations not
|
||||
* requested.*/
|
||||
int authorized_operations_cnt;
|
||||
/** Operations allowed for topic. May be NULL if operations were not
|
||||
* requested */
|
||||
rd_kafka_AclOperation_t *authorized_operations;
|
||||
/** Group specific error. */
|
||||
rd_kafka_error_t *error;
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name DescribeTopics
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief TopicCollection contains a list of topics.
|
||||
*
|
||||
*/
|
||||
struct rd_kafka_TopicCollection_s {
|
||||
char **topics; /**< List of topic names. */
|
||||
size_t topics_cnt; /**< Count of topic names. */
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief TopicPartition result type in DescribeTopics result.
|
||||
*
|
||||
*/
|
||||
struct rd_kafka_TopicPartitionInfo_s {
|
||||
int partition; /**< Partition id. */
|
||||
rd_kafka_Node_t *leader; /**< Leader of the partition. */
|
||||
size_t isr_cnt; /**< Count of insync replicas. */
|
||||
rd_kafka_Node_t **isr; /**< List of in sync replica nodes. */
|
||||
size_t replica_cnt; /**< Count of partition replicas. */
|
||||
rd_kafka_Node_t **replicas; /**< List of replica nodes. */
|
||||
};
|
||||
|
||||
/**
|
||||
* @struct DescribeTopics result
|
||||
*/
|
||||
struct rd_kafka_TopicDescription_s {
|
||||
char *topic; /**< Topic name */
|
||||
rd_kafka_Uuid_t topic_id; /**< Topic Id */
|
||||
int partition_cnt; /**< Number of partitions in \p partitions*/
|
||||
rd_bool_t is_internal; /**< Is the topic is internal to Kafka? */
|
||||
rd_kafka_TopicPartitionInfo_t **partitions; /**< Partitions */
|
||||
rd_kafka_error_t *error; /**< Topic error reported by broker */
|
||||
int authorized_operations_cnt; /**< Count of operations allowed for
|
||||
* topic. -1 indicates operations not
|
||||
* requested. */
|
||||
rd_kafka_AclOperation_t
|
||||
*authorized_operations; /**< Operations allowed for topic. May be
|
||||
* NULL if operations were not requested */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name DescribeCluster
|
||||
* @{
|
||||
*/
|
||||
/**
|
||||
* @struct DescribeCluster result - internal type.
|
||||
*/
|
||||
typedef struct rd_kafka_ClusterDescription_s {
|
||||
char *cluster_id; /**< Cluster id */
|
||||
rd_kafka_Node_t *controller; /**< Current controller. */
|
||||
size_t node_cnt; /**< Count of brokers in the cluster. */
|
||||
rd_kafka_Node_t **nodes; /**< Brokers in the cluster. */
|
||||
int authorized_operations_cnt; /**< Count of operations allowed for
|
||||
* cluster. -1 indicates operations not
|
||||
* requested. */
|
||||
rd_kafka_AclOperation_t
|
||||
*authorized_operations; /**< Operations allowed for cluster. May be
|
||||
* NULL if operations were not requested */
|
||||
|
||||
} rd_kafka_ClusterDescription_t;
|
||||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* @name ElectLeaders
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @struct ElectLeaders request object
|
||||
*/
|
||||
struct rd_kafka_ElectLeaders_s {
|
||||
rd_kafka_ElectionType_t election_type; /*Election Type*/
|
||||
rd_kafka_topic_partition_list_t
|
||||
*partitions; /*TopicPartitions for election*/
|
||||
};
|
||||
|
||||
/**
|
||||
* @struct ElectLeaders result object
|
||||
*/
|
||||
typedef struct rd_kafka_ElectLeadersResult_s {
|
||||
rd_list_t partitions; /**< Type (rd_kafka_topic_partition_result_t *) */
|
||||
} rd_kafka_ElectLeadersResult_t;
|
||||
|
||||
/**@}*/
|
||||
|
||||
#endif /* _RDKAFKA_ADMIN_H_ */
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2020-2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* Copyright (c) 2020 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -136,9 +135,7 @@ rd_kafka_assignment_apply_offsets(rd_kafka_t *rk,
|
|||
rd_kafka_topic_partition_t *rktpar;
|
||||
|
||||
RD_KAFKA_TPLIST_FOREACH(rktpar, offsets) {
|
||||
/* May be NULL, borrow ref. */
|
||||
rd_kafka_toppar_t *rktp =
|
||||
rd_kafka_topic_partition_toppar(rk, rktpar);
|
||||
rd_kafka_toppar_t *rktp = rktpar->_private; /* May be NULL */
|
||||
|
||||
if (!rd_kafka_topic_partition_list_del(
|
||||
rk->rk_consumer.assignment.queried, rktpar->topic,
|
||||
|
|
@ -153,30 +150,8 @@ rd_kafka_assignment_apply_offsets(rd_kafka_t *rk,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (err == RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH ||
|
||||
rktpar->err == RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH) {
|
||||
rd_kafka_topic_partition_t *rktpar_copy;
|
||||
|
||||
rd_kafka_dbg(rk, CGRP, "OFFSETFETCH",
|
||||
"Adding %s [%" PRId32
|
||||
"] back to pending "
|
||||
"list because of stale member epoch",
|
||||
rktpar->topic, rktpar->partition);
|
||||
|
||||
rktpar_copy = rd_kafka_topic_partition_list_add_copy(
|
||||
rk->rk_consumer.assignment.pending, rktpar);
|
||||
/* Need to reset offset to STORED to query for
|
||||
* the committed offset again. If the offset is
|
||||
* kept INVALID then auto.offset.reset will be
|
||||
* triggered.
|
||||
*
|
||||
* Not necessary if err is UNSTABLE_OFFSET_COMMIT
|
||||
* because the buffer is retried there. */
|
||||
rktpar_copy->offset = RD_KAFKA_OFFSET_STORED;
|
||||
|
||||
} else if (err == RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT ||
|
||||
rktpar->err ==
|
||||
RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) {
|
||||
if (err == RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT ||
|
||||
rktpar->err == RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) {
|
||||
/* Ongoing transactions are blocking offset retrieval.
|
||||
* This is typically retried from the OffsetFetch
|
||||
* handler but we can come here if the assignment
|
||||
|
|
@ -232,9 +207,7 @@ rd_kafka_assignment_apply_offsets(rd_kafka_t *rk,
|
|||
/* Do nothing for request-level errors (err is set). */
|
||||
}
|
||||
|
||||
/* In case of stale member epoch we retry to serve the
|
||||
* assignment only after a successful ConsumerGroupHeartbeat. */
|
||||
if (offsets->cnt > 0 && err != RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH)
|
||||
if (offsets->cnt > 0)
|
||||
rd_kafka_assignment_serve(rk);
|
||||
}
|
||||
|
||||
|
|
@ -298,32 +271,18 @@ static void rd_kafka_assignment_handle_OffsetFetch(rd_kafka_t *rk,
|
|||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
if (err) {
|
||||
switch (err) {
|
||||
case RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH:
|
||||
rk->rk_cgrp->rkcg_consumer_flags |=
|
||||
RD_KAFKA_CGRP_CONSUMER_F_SERVE_PENDING;
|
||||
rd_kafka_cgrp_consumer_expedite_next_heartbeat(
|
||||
rk->rk_cgrp,
|
||||
"OffsetFetch error: Stale member epoch");
|
||||
break;
|
||||
case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID:
|
||||
rd_kafka_cgrp_consumer_expedite_next_heartbeat(
|
||||
rk->rk_cgrp, "OffsetFetch error: Unknown member");
|
||||
break;
|
||||
default:
|
||||
rd_kafka_dbg(
|
||||
rk, CGRP, "OFFSET",
|
||||
"Offset fetch error for %d partition(s): %s",
|
||||
offsets->cnt, rd_kafka_err2str(err));
|
||||
rd_kafka_consumer_err(
|
||||
rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0,
|
||||
NULL, NULL, RD_KAFKA_OFFSET_INVALID,
|
||||
"Failed to fetch committed offsets for "
|
||||
"%d partition(s) in group \"%s\": %s",
|
||||
offsets->cnt, rk->rk_group_id->str,
|
||||
rd_kafka_err2str(err));
|
||||
}
|
||||
rd_kafka_dbg(rk, CGRP, "OFFSET",
|
||||
"Offset fetch error for %d partition(s): %s",
|
||||
offsets->cnt, rd_kafka_err2str(err));
|
||||
rd_kafka_consumer_err(
|
||||
rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0, NULL,
|
||||
NULL, RD_KAFKA_OFFSET_INVALID,
|
||||
"Failed to fetch committed offsets for "
|
||||
"%d partition(s) in group \"%s\": %s",
|
||||
offsets->cnt, rk->rk_group_id->str, rd_kafka_err2str(err));
|
||||
}
|
||||
|
||||
/* Apply the fetched offsets to the assignment */
|
||||
|
|
@ -343,9 +302,7 @@ static int rd_kafka_assignment_serve_removals(rd_kafka_t *rk) {
|
|||
int valid_offsets = 0;
|
||||
|
||||
RD_KAFKA_TPLIST_FOREACH(rktpar, rk->rk_consumer.assignment.removed) {
|
||||
rd_kafka_toppar_t *rktp =
|
||||
rd_kafka_topic_partition_ensure_toppar(
|
||||
rk, rktpar, rd_true); /* Borrow ref */
|
||||
rd_kafka_toppar_t *rktp = rktpar->_private; /* Borrow ref */
|
||||
int was_pending, was_queried;
|
||||
|
||||
/* Remove partition from pending and querying lists,
|
||||
|
|
@ -376,21 +333,17 @@ static int rd_kafka_assignment_serve_removals(rd_kafka_t *rk) {
|
|||
|
||||
rd_kafka_toppar_lock(rktp);
|
||||
|
||||
/* Save the currently stored offset and epoch on .removed
|
||||
/* Save the currently stored offset on .removed
|
||||
* so it will be committed below. */
|
||||
rd_kafka_topic_partition_set_from_fetch_pos(
|
||||
rktpar, rktp->rktp_stored_pos);
|
||||
rd_kafka_topic_partition_set_metadata_from_rktp_stored(rktpar,
|
||||
rktp);
|
||||
rktpar->offset = rktp->rktp_stored_offset;
|
||||
valid_offsets += !RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset);
|
||||
|
||||
/* Reset the stored offset to invalid so that
|
||||
* a manual offset-less commit() or the auto-committer
|
||||
* will not commit a stored offset from a previous
|
||||
* assignment (issue #2782). */
|
||||
rd_kafka_offset_store0(
|
||||
rktp, RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID, -1), NULL,
|
||||
0, rd_true, RD_DONT_LOCK);
|
||||
rd_kafka_offset_store0(rktp, RD_KAFKA_OFFSET_INVALID, rd_true,
|
||||
RD_DONT_LOCK);
|
||||
|
||||
/* Partition is no longer desired */
|
||||
rd_kafka_toppar_desired_del(rktp);
|
||||
|
|
@ -469,9 +422,7 @@ static int rd_kafka_assignment_serve_pending(rd_kafka_t *rk) {
|
|||
for (i = rk->rk_consumer.assignment.pending->cnt - 1; i >= 0; i--) {
|
||||
rd_kafka_topic_partition_t *rktpar =
|
||||
&rk->rk_consumer.assignment.pending->elems[i];
|
||||
/* Borrow ref */
|
||||
rd_kafka_toppar_t *rktp =
|
||||
rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true);
|
||||
rd_kafka_toppar_t *rktp = rktpar->_private; /* Borrow ref */
|
||||
|
||||
rd_assert(!rktp->rktp_started);
|
||||
|
||||
|
|
@ -492,11 +443,9 @@ static int rd_kafka_assignment_serve_pending(rd_kafka_t *rk) {
|
|||
|
||||
rd_kafka_dbg(rk, CGRP, "SRVPEND",
|
||||
"Starting pending assigned partition "
|
||||
"%s [%" PRId32 "] at %s",
|
||||
"%s [%" PRId32 "] at offset %s",
|
||||
rktpar->topic, rktpar->partition,
|
||||
rd_kafka_fetch_pos2str(
|
||||
rd_kafka_topic_partition_get_fetch_pos(
|
||||
rktpar)));
|
||||
rd_kafka_offset2str(rktpar->offset));
|
||||
|
||||
/* Reset the (lib) pause flag which may have been set by
|
||||
* the cgrp when scheduling the rebalance callback. */
|
||||
|
|
@ -508,10 +457,9 @@ static int rd_kafka_assignment_serve_pending(rd_kafka_t *rk) {
|
|||
rktp->rktp_started = rd_true;
|
||||
rk->rk_consumer.assignment.started_cnt++;
|
||||
|
||||
rd_kafka_toppar_op_fetch_start(
|
||||
rktp,
|
||||
rd_kafka_topic_partition_get_fetch_pos(rktpar),
|
||||
rk->rk_consumer.q, RD_KAFKA_NO_REPLYQ);
|
||||
rd_kafka_toppar_op_fetch_start(rktp, rktpar->offset,
|
||||
rk->rk_consumer.q,
|
||||
RD_KAFKA_NO_REPLYQ);
|
||||
|
||||
|
||||
} else if (can_query_offsets) {
|
||||
|
|
@ -581,8 +529,7 @@ static int rd_kafka_assignment_serve_pending(rd_kafka_t *rk) {
|
|||
partitions_to_query->cnt);
|
||||
|
||||
rd_kafka_OffsetFetchRequest(
|
||||
coord, rk->rk_group_id->str, partitions_to_query, rd_false,
|
||||
-1, NULL,
|
||||
coord, rk->rk_group_id->str, partitions_to_query,
|
||||
rk->rk_conf.isolation_level ==
|
||||
RD_KAFKA_READ_COMMITTED /*require_stable_offsets*/,
|
||||
0, /* Timeout */
|
||||
|
|
@ -786,9 +733,8 @@ rd_kafka_assignment_add(rd_kafka_t *rk,
|
|||
|
||||
/* Reset the stored offset to INVALID to avoid the race
|
||||
* condition described in rdkafka_offset.h */
|
||||
rd_kafka_offset_store0(
|
||||
rktp, RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID, -1), NULL,
|
||||
0, rd_true /* force */, RD_DONT_LOCK);
|
||||
rd_kafka_offset_store0(rktp, RD_KAFKA_OFFSET_INVALID,
|
||||
rd_true /* force */, RD_DONT_LOCK);
|
||||
|
||||
rd_kafka_toppar_unlock(rktp);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2020-2022, Magnus Edenhill
|
||||
* Copyright (c) 2020 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2015-2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* Copyright (c) 2015 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -60,9 +59,6 @@ void rd_kafka_group_member_clear(rd_kafka_group_member_t *rkgm) {
|
|||
if (rkgm->rkgm_member_metadata)
|
||||
rd_kafkap_bytes_destroy(rkgm->rkgm_member_metadata);
|
||||
|
||||
if (rkgm->rkgm_rack_id)
|
||||
rd_kafkap_str_destroy(rkgm->rkgm_rack_id);
|
||||
|
||||
memset(rkgm, 0, sizeof(*rkgm));
|
||||
}
|
||||
|
||||
|
|
@ -110,9 +106,7 @@ rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new(
|
|||
const rd_list_t *topics,
|
||||
const void *userdata,
|
||||
size_t userdata_size,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions,
|
||||
int generation,
|
||||
const rd_kafkap_str_t *rack_id) {
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions) {
|
||||
|
||||
rd_kafka_buf_t *rkbuf;
|
||||
rd_kafkap_bytes_t *kbytes;
|
||||
|
|
@ -130,14 +124,12 @@ rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new(
|
|||
* OwnedPartitions => [Topic Partitions] // added in v1
|
||||
* Topic => string
|
||||
* Partitions => [int32]
|
||||
* GenerationId => int32 // added in v2
|
||||
* RackId => string // added in v3
|
||||
*/
|
||||
|
||||
rkbuf = rd_kafka_buf_new(1, 100 + (topic_cnt * 100) + userdata_size);
|
||||
|
||||
/* Version */
|
||||
rd_kafka_buf_write_i16(rkbuf, 3);
|
||||
rd_kafka_buf_write_i16(rkbuf, 1);
|
||||
rd_kafka_buf_write_i32(rkbuf, topic_cnt);
|
||||
RD_LIST_FOREACH(tinfo, topics, i)
|
||||
rd_kafka_buf_write_str(rkbuf, tinfo->topic, -1);
|
||||
|
|
@ -152,22 +144,13 @@ rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new(
|
|||
/* If there are no owned partitions, this is specified as an
|
||||
* empty array, not NULL. */
|
||||
rd_kafka_buf_write_i32(rkbuf, 0); /* Topic count */
|
||||
else {
|
||||
const rd_kafka_topic_partition_field_t fields[] = {
|
||||
RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
|
||||
RD_KAFKA_TOPIC_PARTITION_FIELD_END};
|
||||
else
|
||||
rd_kafka_buf_write_topic_partitions(
|
||||
rkbuf, owned_partitions,
|
||||
rd_false /*don't skip invalid offsets*/,
|
||||
rd_false /*any offset*/, rd_false /*don't use topic id*/,
|
||||
rd_true /*use topic name*/, fields);
|
||||
}
|
||||
|
||||
/* Following data is ignored by consumer version < 2 */
|
||||
rd_kafka_buf_write_i32(rkbuf, generation);
|
||||
|
||||
/* Following data is ignored by consumer version < 3 */
|
||||
rd_kafka_buf_write_kstr(rkbuf, rack_id);
|
||||
rd_false /*any offset*/, rd_false /*don't write offsets*/,
|
||||
rd_false /*don't write epoch*/,
|
||||
rd_false /*don't write metadata*/);
|
||||
|
||||
/* Get binary buffer and allocate a new Kafka Bytes with a copy. */
|
||||
rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
|
||||
|
|
@ -185,13 +168,9 @@ rd_kafkap_bytes_t *rd_kafka_assignor_get_metadata_with_empty_userdata(
|
|||
const rd_kafka_assignor_t *rkas,
|
||||
void *assignor_state,
|
||||
const rd_list_t *topics,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions,
|
||||
const rd_kafkap_str_t *rack_id) {
|
||||
/* Generation was earlier populated inside userData, and older versions
|
||||
* of clients still expect that. So, in case the userData is empty, we
|
||||
* set the explicit generation field to the default value, -1 */
|
||||
return rd_kafka_consumer_protocol_member_metadata_new(
|
||||
topics, NULL, 0, owned_partitions, -1 /* generation */, rack_id);
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions) {
|
||||
return rd_kafka_consumer_protocol_member_metadata_new(topics, NULL, 0,
|
||||
owned_partitions);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -263,8 +242,6 @@ rd_kafka_member_subscriptions_map(rd_kafka_cgrp_t *rkcg,
|
|||
int member_cnt) {
|
||||
int ti;
|
||||
rd_kafka_assignor_topic_t *eligible_topic = NULL;
|
||||
rd_kafka_metadata_internal_t *mdi =
|
||||
rd_kafka_metadata_get_internal(metadata);
|
||||
|
||||
rd_list_init(eligible_topics, RD_MIN(metadata->topic_cnt, 10),
|
||||
(void *)rd_kafka_assignor_topic_destroy);
|
||||
|
|
@ -306,8 +283,7 @@ rd_kafka_member_subscriptions_map(rd_kafka_cgrp_t *rkcg,
|
|||
continue;
|
||||
}
|
||||
|
||||
eligible_topic->metadata = &metadata->topics[ti];
|
||||
eligible_topic->metadata_internal = &mdi->topics[ti];
|
||||
eligible_topic->metadata = &metadata->topics[ti];
|
||||
rd_list_add(eligible_topics, eligible_topic);
|
||||
eligible_topic = NULL;
|
||||
}
|
||||
|
|
@ -507,8 +483,7 @@ rd_kafka_resp_err_t rd_kafka_assignor_add(
|
|||
const struct rd_kafka_assignor_s *rkas,
|
||||
void *assignor_state,
|
||||
const rd_list_t *topics,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions,
|
||||
const rd_kafkap_str_t *rack_id),
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions),
|
||||
void (*on_assignment_cb)(const struct rd_kafka_assignor_s *rkas,
|
||||
void **assignor_state,
|
||||
const rd_kafka_topic_partition_list_t *assignment,
|
||||
|
|
@ -659,676 +634,6 @@ void rd_kafka_assignors_term(rd_kafka_t *rk) {
|
|||
rd_list_destroy(&rk->rk_conf.partition_assignors);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Computes whether rack-aware assignment needs to be used, or not.
|
||||
*/
|
||||
rd_bool_t
|
||||
rd_kafka_use_rack_aware_assignment(rd_kafka_assignor_topic_t **topics,
|
||||
size_t topic_cnt,
|
||||
const rd_kafka_metadata_internal_t *mdi) {
|
||||
/* Computing needs_rack_aware_assignment requires the evaluation of
|
||||
three criteria:
|
||||
|
||||
1. At least one of the member has a non-null rack.
|
||||
2. At least one common rack exists between members and partitions.
|
||||
3. There is a partition which doesn't have replicas on all possible
|
||||
racks, or in other words, all partitions don't have replicas on all
|
||||
racks. Note that 'all racks' here means racks across all replicas of
|
||||
all partitions, not including consumer racks. Also note that 'all
|
||||
racks' are computed per-topic for range assignor, and across topics
|
||||
for sticky assignor.
|
||||
*/
|
||||
|
||||
int i;
|
||||
size_t t;
|
||||
rd_kafka_group_member_t *member;
|
||||
rd_list_t *all_consumer_racks = NULL; /* Contained Type: char* */
|
||||
rd_list_t *all_partition_racks = NULL; /* Contained Type: char* */
|
||||
char *rack_id = NULL;
|
||||
rd_bool_t needs_rack_aware_assignment = rd_true; /* assume true */
|
||||
|
||||
/* Criteria 1 */
|
||||
/* We don't copy racks, so the free function is NULL. */
|
||||
all_consumer_racks = rd_list_new(0, NULL);
|
||||
|
||||
for (t = 0; t < topic_cnt; t++) {
|
||||
RD_LIST_FOREACH(member, &topics[t]->members, i) {
|
||||
if (member->rkgm_rack_id &&
|
||||
RD_KAFKAP_STR_LEN(member->rkgm_rack_id)) {
|
||||
/* Repetitions are fine, we will dedup it later.
|
||||
*/
|
||||
rd_list_add(
|
||||
all_consumer_racks,
|
||||
/* The const qualifier has to be discarded
|
||||
because of how rd_list_t and
|
||||
rd_kafkap_str_t are, but we never modify
|
||||
items in all_consumer_racks. */
|
||||
(char *)member->rkgm_rack_id->str);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (rd_list_cnt(all_consumer_racks) == 0) {
|
||||
needs_rack_aware_assignment = rd_false;
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
||||
/* Critera 2 */
|
||||
/* We don't copy racks, so the free function is NULL. */
|
||||
all_partition_racks = rd_list_new(0, NULL);
|
||||
|
||||
for (t = 0; t < topic_cnt; t++) {
|
||||
const int partition_cnt = topics[t]->metadata->partition_cnt;
|
||||
for (i = 0; i < partition_cnt; i++) {
|
||||
size_t j;
|
||||
for (j = 0; j < topics[t]
|
||||
->metadata_internal->partitions[i]
|
||||
.racks_cnt;
|
||||
j++) {
|
||||
char *rack =
|
||||
topics[t]
|
||||
->metadata_internal->partitions[i]
|
||||
.racks[j];
|
||||
rd_list_add(all_partition_racks, rack);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* If there are no partition racks, Criteria 2 cannot possibly be met.
|
||||
*/
|
||||
if (rd_list_cnt(all_partition_racks) == 0) {
|
||||
needs_rack_aware_assignment = rd_false;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Sort and dedup the racks. */
|
||||
rd_list_deduplicate(&all_consumer_racks, rd_strcmp2);
|
||||
rd_list_deduplicate(&all_partition_racks, rd_strcmp2);
|
||||
|
||||
|
||||
/* Iterate through each list in order, and see if there's anything in
|
||||
* common */
|
||||
RD_LIST_FOREACH(rack_id, all_consumer_racks, i) {
|
||||
/* Break if there's even a single match. */
|
||||
if (rd_list_find(all_partition_racks, rack_id, rd_strcmp2)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == rd_list_cnt(all_consumer_racks)) {
|
||||
needs_rack_aware_assignment = rd_false;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Criteria 3 */
|
||||
for (t = 0; t < topic_cnt; t++) {
|
||||
const int partition_cnt = topics[t]->metadata->partition_cnt;
|
||||
for (i = 0; i < partition_cnt; i++) {
|
||||
/* Since partition_racks[i] is a subset of
|
||||
* all_partition_racks, and both of them are deduped,
|
||||
* the same size indicates that they're equal. */
|
||||
if ((size_t)(rd_list_cnt(all_partition_racks)) !=
|
||||
topics[t]
|
||||
->metadata_internal->partitions[i]
|
||||
.racks_cnt) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i < partition_cnt) {
|
||||
/* Break outer loop if inner loop was broken. */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Implies that all partitions have replicas on all racks. */
|
||||
if (t == topic_cnt)
|
||||
needs_rack_aware_assignment = rd_false;
|
||||
|
||||
done:
|
||||
RD_IF_FREE(all_consumer_racks, rd_list_destroy);
|
||||
RD_IF_FREE(all_partition_racks, rd_list_destroy);
|
||||
|
||||
return needs_rack_aware_assignment;
|
||||
}
|
||||
|
||||
|
||||
/* Helper to populate the racks for brokers in the metadata for unit tests.
|
||||
* Passing num_broker_racks = 0 will return NULL racks. */
|
||||
void ut_populate_internal_broker_metadata(rd_kafka_metadata_internal_t *mdi,
|
||||
int num_broker_racks,
|
||||
rd_kafkap_str_t *all_racks[],
|
||||
size_t all_racks_cnt) {
|
||||
int i;
|
||||
|
||||
rd_assert(num_broker_racks < (int)all_racks_cnt);
|
||||
|
||||
for (i = 0; i < mdi->metadata.broker_cnt; i++) {
|
||||
mdi->brokers[i].id = i;
|
||||
/* Cast from const to non-const. We don't intend to modify it,
|
||||
* but unfortunately neither implementation of rd_kafkap_str_t
|
||||
* or rd_kafka_metadata_broker_internal_t can be changed. So,
|
||||
* this cast is used - in unit tests only. */
|
||||
mdi->brokers[i].rack_id =
|
||||
(char *)(num_broker_racks
|
||||
? all_racks[i % num_broker_racks]->str
|
||||
: NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/* Helper to populate the deduplicated racks inside each partition. It's assumed
|
||||
* that `mdi->brokers` is set, maybe using
|
||||
* `ut_populate_internal_broker_metadata`. */
|
||||
void ut_populate_internal_topic_metadata(rd_kafka_metadata_internal_t *mdi) {
|
||||
int ti;
|
||||
rd_kafka_metadata_broker_internal_t *brokers_internal;
|
||||
size_t broker_cnt;
|
||||
|
||||
rd_assert(mdi->brokers);
|
||||
|
||||
brokers_internal = mdi->brokers;
|
||||
broker_cnt = mdi->metadata.broker_cnt;
|
||||
|
||||
for (ti = 0; ti < mdi->metadata.topic_cnt; ti++) {
|
||||
int i;
|
||||
rd_kafka_metadata_topic_t *mdt = &mdi->metadata.topics[ti];
|
||||
rd_kafka_metadata_topic_internal_t *mdti = &mdi->topics[ti];
|
||||
|
||||
for (i = 0; i < mdt->partition_cnt; i++) {
|
||||
int j;
|
||||
rd_kafka_metadata_partition_t *partition =
|
||||
&mdt->partitions[i];
|
||||
rd_kafka_metadata_partition_internal_t
|
||||
*partition_internal = &mdti->partitions[i];
|
||||
|
||||
rd_list_t *curr_list;
|
||||
char *rack;
|
||||
|
||||
if (partition->replica_cnt == 0)
|
||||
continue;
|
||||
|
||||
curr_list = rd_list_new(
|
||||
0, NULL); /* use a list for de-duplication */
|
||||
for (j = 0; j < partition->replica_cnt; j++) {
|
||||
rd_kafka_metadata_broker_internal_t key = {
|
||||
.id = partition->replicas[j]};
|
||||
rd_kafka_metadata_broker_internal_t *broker =
|
||||
bsearch(
|
||||
&key, brokers_internal, broker_cnt,
|
||||
sizeof(
|
||||
rd_kafka_metadata_broker_internal_t),
|
||||
rd_kafka_metadata_broker_internal_cmp);
|
||||
if (!broker || !broker->rack_id)
|
||||
continue;
|
||||
rd_list_add(curr_list, broker->rack_id);
|
||||
}
|
||||
rd_list_deduplicate(&curr_list, rd_strcmp2);
|
||||
|
||||
partition_internal->racks_cnt = rd_list_cnt(curr_list);
|
||||
partition_internal->racks = rd_malloc(
|
||||
sizeof(char *) * partition_internal->racks_cnt);
|
||||
RD_LIST_FOREACH(rack, curr_list, j) {
|
||||
partition_internal->racks[j] =
|
||||
rack; /* no duplication */
|
||||
}
|
||||
rd_list_destroy(curr_list);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Helper to destroy test metadata. Destroying the metadata has some additional
|
||||
* steps in case of tests. */
|
||||
void ut_destroy_metadata(rd_kafka_metadata_t *md) {
|
||||
int ti;
|
||||
rd_kafka_metadata_internal_t *mdi = rd_kafka_metadata_get_internal(md);
|
||||
|
||||
for (ti = 0; ti < md->topic_cnt; ti++) {
|
||||
int i;
|
||||
rd_kafka_metadata_topic_t *mdt = &md->topics[ti];
|
||||
rd_kafka_metadata_topic_internal_t *mdti = &mdi->topics[ti];
|
||||
|
||||
for (i = 0; mdti && i < mdt->partition_cnt; i++) {
|
||||
rd_free(mdti->partitions[i].racks);
|
||||
}
|
||||
}
|
||||
|
||||
rd_kafka_metadata_destroy(md);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Set a member's owned partitions based on its assignment.
|
||||
*
|
||||
* For use between assignor_run(). This is mimicing a consumer receiving
|
||||
* its new assignment and including it in the next rebalance as its
|
||||
* owned-partitions.
|
||||
*/
|
||||
void ut_set_owned(rd_kafka_group_member_t *rkgm) {
|
||||
if (rkgm->rkgm_owned)
|
||||
rd_kafka_topic_partition_list_destroy(rkgm->rkgm_owned);
|
||||
|
||||
rkgm->rkgm_owned =
|
||||
rd_kafka_topic_partition_list_copy(rkgm->rkgm_assignment);
|
||||
}
|
||||
|
||||
|
||||
void ut_print_toppar_list(const rd_kafka_topic_partition_list_t *partitions) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < partitions->cnt; i++)
|
||||
RD_UT_SAY(" %s [%" PRId32 "]", partitions->elems[i].topic,
|
||||
partitions->elems[i].partition);
|
||||
}
|
||||
|
||||
|
||||
/* Implementation for ut_init_member and ut_init_member_with_rackv. */
|
||||
static void ut_init_member_internal(rd_kafka_group_member_t *rkgm,
|
||||
const char *member_id,
|
||||
const rd_kafkap_str_t *rack_id,
|
||||
va_list ap) {
|
||||
const char *topic;
|
||||
|
||||
memset(rkgm, 0, sizeof(*rkgm));
|
||||
|
||||
rkgm->rkgm_member_id = rd_kafkap_str_new(member_id, -1);
|
||||
rkgm->rkgm_group_instance_id = rd_kafkap_str_new(member_id, -1);
|
||||
rkgm->rkgm_rack_id = rack_id ? rd_kafkap_str_copy(rack_id) : NULL;
|
||||
|
||||
rd_list_init(&rkgm->rkgm_eligible, 0, NULL);
|
||||
|
||||
rkgm->rkgm_subscription = rd_kafka_topic_partition_list_new(4);
|
||||
|
||||
while ((topic = va_arg(ap, const char *)))
|
||||
rd_kafka_topic_partition_list_add(rkgm->rkgm_subscription,
|
||||
topic, RD_KAFKA_PARTITION_UA);
|
||||
|
||||
rkgm->rkgm_assignment =
|
||||
rd_kafka_topic_partition_list_new(rkgm->rkgm_subscription->size);
|
||||
|
||||
rkgm->rkgm_generation = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Initialize group member struct for testing.
|
||||
*
|
||||
* va-args is a NULL-terminated list of (const char *) topics.
|
||||
*
|
||||
* Use rd_kafka_group_member_clear() to free fields.
|
||||
*/
|
||||
void ut_init_member(rd_kafka_group_member_t *rkgm, const char *member_id, ...) {
|
||||
va_list ap;
|
||||
va_start(ap, member_id);
|
||||
ut_init_member_internal(rkgm, member_id, NULL, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Initialize group member struct for testing with a rackid.
|
||||
*
|
||||
* va-args is a NULL-terminated list of (const char *) topics.
|
||||
*
|
||||
* Use rd_kafka_group_member_clear() to free fields.
|
||||
*/
|
||||
void ut_init_member_with_rackv(rd_kafka_group_member_t *rkgm,
|
||||
const char *member_id,
|
||||
const rd_kafkap_str_t *rack_id,
|
||||
...) {
|
||||
va_list ap;
|
||||
va_start(ap, rack_id);
|
||||
ut_init_member_internal(rkgm, member_id, rack_id, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Initialize group member struct for testing with a rackid.
|
||||
*
|
||||
* Topics that the member is subscribed to are specified in an array with the
|
||||
* size specified separately.
|
||||
*
|
||||
* Use rd_kafka_group_member_clear() to free fields.
|
||||
*/
|
||||
void ut_init_member_with_rack(rd_kafka_group_member_t *rkgm,
|
||||
const char *member_id,
|
||||
const rd_kafkap_str_t *rack_id,
|
||||
char *topics[],
|
||||
size_t topic_cnt) {
|
||||
size_t i;
|
||||
|
||||
memset(rkgm, 0, sizeof(*rkgm));
|
||||
|
||||
rkgm->rkgm_member_id = rd_kafkap_str_new(member_id, -1);
|
||||
rkgm->rkgm_group_instance_id = rd_kafkap_str_new(member_id, -1);
|
||||
rkgm->rkgm_rack_id = rack_id ? rd_kafkap_str_copy(rack_id) : NULL;
|
||||
rd_list_init(&rkgm->rkgm_eligible, 0, NULL);
|
||||
|
||||
rkgm->rkgm_subscription = rd_kafka_topic_partition_list_new(4);
|
||||
|
||||
for (i = 0; i < topic_cnt; i++) {
|
||||
rd_kafka_topic_partition_list_add(
|
||||
rkgm->rkgm_subscription, topics[i], RD_KAFKA_PARTITION_UA);
|
||||
}
|
||||
rkgm->rkgm_assignment =
|
||||
rd_kafka_topic_partition_list_new(rkgm->rkgm_subscription->size);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Verify that member's assignment matches the expected partitions.
|
||||
*
|
||||
* The va-list is a NULL-terminated list of (const char *topic, int partition)
|
||||
* tuples.
|
||||
*
|
||||
* @returns 0 on success, else raises a unittest error and returns 1.
|
||||
*/
|
||||
int verifyAssignment0(const char *function,
|
||||
int line,
|
||||
rd_kafka_group_member_t *rkgm,
|
||||
...) {
|
||||
va_list ap;
|
||||
int cnt = 0;
|
||||
const char *topic;
|
||||
int fails = 0;
|
||||
|
||||
va_start(ap, rkgm);
|
||||
while ((topic = va_arg(ap, const char *))) {
|
||||
int partition = va_arg(ap, int);
|
||||
cnt++;
|
||||
|
||||
if (!rd_kafka_topic_partition_list_find(rkgm->rkgm_assignment,
|
||||
topic, partition)) {
|
||||
RD_UT_WARN(
|
||||
"%s:%d: Expected %s [%d] not found in %s's "
|
||||
"assignment (%d partition(s))",
|
||||
function, line, topic, partition,
|
||||
rkgm->rkgm_member_id->str,
|
||||
rkgm->rkgm_assignment->cnt);
|
||||
fails++;
|
||||
}
|
||||
}
|
||||
va_end(ap);
|
||||
|
||||
if (cnt != rkgm->rkgm_assignment->cnt) {
|
||||
RD_UT_WARN(
|
||||
"%s:%d: "
|
||||
"Expected %d assigned partition(s) for %s, not %d",
|
||||
function, line, cnt, rkgm->rkgm_member_id->str,
|
||||
rkgm->rkgm_assignment->cnt);
|
||||
fails++;
|
||||
}
|
||||
|
||||
if (fails)
|
||||
ut_print_toppar_list(rkgm->rkgm_assignment);
|
||||
|
||||
RD_UT_ASSERT(!fails, "%s:%d: See previous errors", function, line);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Verify that all members' assignment matches the expected partitions.
|
||||
*
|
||||
* The va-list is a list of (const char *topic, int partition)
|
||||
* tuples, and NULL to demarcate different members' assignment.
|
||||
*
|
||||
* @returns 0 on success, else raises a unittest error and returns 1.
|
||||
*/
|
||||
int verifyMultipleAssignment0(const char *function,
|
||||
int line,
|
||||
rd_kafka_group_member_t *rkgms,
|
||||
size_t member_cnt,
|
||||
...) {
|
||||
va_list ap;
|
||||
const char *topic;
|
||||
int fails = 0;
|
||||
size_t i = 0;
|
||||
|
||||
if (member_cnt == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
va_start(ap, member_cnt);
|
||||
for (i = 0; i < member_cnt; i++) {
|
||||
rd_kafka_group_member_t *rkgm = &rkgms[i];
|
||||
int cnt = 0;
|
||||
int local_fails = 0;
|
||||
|
||||
while ((topic = va_arg(ap, const char *))) {
|
||||
int partition = va_arg(ap, int);
|
||||
cnt++;
|
||||
|
||||
if (!rd_kafka_topic_partition_list_find(
|
||||
rkgm->rkgm_assignment, topic, partition)) {
|
||||
RD_UT_WARN(
|
||||
"%s:%d: Expected %s [%d] not found in %s's "
|
||||
"assignment (%d partition(s))",
|
||||
function, line, topic, partition,
|
||||
rkgm->rkgm_member_id->str,
|
||||
rkgm->rkgm_assignment->cnt);
|
||||
local_fails++;
|
||||
}
|
||||
}
|
||||
|
||||
if (cnt != rkgm->rkgm_assignment->cnt) {
|
||||
RD_UT_WARN(
|
||||
"%s:%d: "
|
||||
"Expected %d assigned partition(s) for %s, not %d",
|
||||
function, line, cnt, rkgm->rkgm_member_id->str,
|
||||
rkgm->rkgm_assignment->cnt);
|
||||
fails++;
|
||||
}
|
||||
|
||||
if (local_fails)
|
||||
ut_print_toppar_list(rkgm->rkgm_assignment);
|
||||
fails += local_fails;
|
||||
}
|
||||
va_end(ap);
|
||||
|
||||
RD_UT_ASSERT(!fails, "%s:%d: See previous errors", function, line);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#define verifyNumPartitionsWithRackMismatchPartition(rktpar, metadata, \
|
||||
increase) \
|
||||
do { \
|
||||
if (!rktpar) \
|
||||
break; \
|
||||
int i; \
|
||||
rd_bool_t noneMatch = rd_true; \
|
||||
rd_kafka_metadata_internal_t *metadata_internal = \
|
||||
rd_kafka_metadata_get_internal(metadata); \
|
||||
\
|
||||
for (i = 0; i < metadata->topics[j].partitions[k].replica_cnt; \
|
||||
i++) { \
|
||||
int32_t replica_id = \
|
||||
metadata->topics[j].partitions[k].replicas[i]; \
|
||||
rd_kafka_metadata_broker_internal_t *broker; \
|
||||
rd_kafka_metadata_broker_internal_find( \
|
||||
metadata_internal, replica_id, broker); \
|
||||
\
|
||||
if (broker && !strcmp(rack_id, broker->rack_id)) { \
|
||||
noneMatch = rd_false; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
if (noneMatch) \
|
||||
increase++; \
|
||||
} while (0);
|
||||
|
||||
/**
|
||||
* @brief Verify number of partitions with rack mismatch.
|
||||
*/
|
||||
int verifyNumPartitionsWithRackMismatch0(const char *function,
|
||||
int line,
|
||||
rd_kafka_metadata_t *metadata,
|
||||
rd_kafka_group_member_t *rkgms,
|
||||
size_t member_cnt,
|
||||
int expectedNumMismatch) {
|
||||
size_t i;
|
||||
int j, k;
|
||||
|
||||
int numMismatched = 0;
|
||||
for (i = 0; i < member_cnt; i++) {
|
||||
rd_kafka_group_member_t *rkgm = &rkgms[i];
|
||||
const char *rack_id = rkgm->rkgm_rack_id->str;
|
||||
if (rack_id) {
|
||||
for (j = 0; j < metadata->topic_cnt; j++) {
|
||||
for (k = 0;
|
||||
k < metadata->topics[j].partition_cnt;
|
||||
k++) {
|
||||
rd_kafka_topic_partition_t *rktpar =
|
||||
rd_kafka_topic_partition_list_find(
|
||||
rkgm->rkgm_assignment,
|
||||
metadata->topics[j].topic, k);
|
||||
verifyNumPartitionsWithRackMismatchPartition(
|
||||
rktpar, metadata, numMismatched);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
RD_UT_ASSERT(expectedNumMismatch == numMismatched,
|
||||
"%s:%d: Expected %d mismatches, got %d", function, line,
|
||||
expectedNumMismatch, numMismatched);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int verifyValidityAndBalance0(const char *func,
|
||||
int line,
|
||||
rd_kafka_group_member_t *members,
|
||||
size_t member_cnt,
|
||||
const rd_kafka_metadata_t *metadata) {
|
||||
int fails = 0;
|
||||
int i;
|
||||
rd_bool_t verbose = rd_false; /* Enable for troubleshooting */
|
||||
|
||||
RD_UT_SAY("%s:%d: verifying assignment for %d member(s):", func, line,
|
||||
(int)member_cnt);
|
||||
|
||||
for (i = 0; i < (int)member_cnt; i++) {
|
||||
const char *consumer = members[i].rkgm_member_id->str;
|
||||
const rd_kafka_topic_partition_list_t *partitions =
|
||||
members[i].rkgm_assignment;
|
||||
int p, j;
|
||||
|
||||
if (verbose)
|
||||
RD_UT_SAY(
|
||||
"%s:%d: "
|
||||
"consumer \"%s\", %d subscribed topic(s), "
|
||||
"%d assigned partition(s):",
|
||||
func, line, consumer,
|
||||
members[i].rkgm_subscription->cnt, partitions->cnt);
|
||||
|
||||
for (p = 0; p < partitions->cnt; p++) {
|
||||
const rd_kafka_topic_partition_t *partition =
|
||||
&partitions->elems[p];
|
||||
|
||||
if (verbose)
|
||||
RD_UT_SAY("%s:%d: %s [%" PRId32 "]", func,
|
||||
line, partition->topic,
|
||||
partition->partition);
|
||||
|
||||
if (!rd_kafka_topic_partition_list_find(
|
||||
members[i].rkgm_subscription, partition->topic,
|
||||
RD_KAFKA_PARTITION_UA)) {
|
||||
RD_UT_WARN("%s [%" PRId32
|
||||
"] is assigned to "
|
||||
"%s but it is not subscribed to "
|
||||
"that topic",
|
||||
partition->topic,
|
||||
partition->partition, consumer);
|
||||
fails++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update the member's owned partitions to match
|
||||
* the assignment. */
|
||||
ut_set_owned(&members[i]);
|
||||
|
||||
if (i == (int)member_cnt - 1)
|
||||
continue;
|
||||
|
||||
for (j = i + 1; j < (int)member_cnt; j++) {
|
||||
const char *otherConsumer =
|
||||
members[j].rkgm_member_id->str;
|
||||
const rd_kafka_topic_partition_list_t *otherPartitions =
|
||||
members[j].rkgm_assignment;
|
||||
rd_bool_t balanced =
|
||||
abs(partitions->cnt - otherPartitions->cnt) <= 1;
|
||||
|
||||
for (p = 0; p < partitions->cnt; p++) {
|
||||
const rd_kafka_topic_partition_t *partition =
|
||||
&partitions->elems[p];
|
||||
|
||||
if (rd_kafka_topic_partition_list_find(
|
||||
otherPartitions, partition->topic,
|
||||
partition->partition)) {
|
||||
RD_UT_WARN(
|
||||
"Consumer %s and %s are both "
|
||||
"assigned %s [%" PRId32 "]",
|
||||
consumer, otherConsumer,
|
||||
partition->topic,
|
||||
partition->partition);
|
||||
fails++;
|
||||
}
|
||||
|
||||
|
||||
/* If assignment is imbalanced and this topic
|
||||
* is also subscribed by the other consumer
|
||||
* it means the assignment strategy failed to
|
||||
* properly balance the partitions. */
|
||||
if (!balanced &&
|
||||
rd_kafka_topic_partition_list_find_topic_by_name(
|
||||
otherPartitions, partition->topic)) {
|
||||
RD_UT_WARN(
|
||||
"Some %s partition(s) can be "
|
||||
"moved from "
|
||||
"%s (%d partition(s)) to "
|
||||
"%s (%d partition(s)) to "
|
||||
"achieve a better balance",
|
||||
partition->topic, consumer,
|
||||
partitions->cnt, otherConsumer,
|
||||
otherPartitions->cnt);
|
||||
fails++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
RD_UT_ASSERT(!fails, "%s:%d: See %d previous errors", func, line,
|
||||
fails);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Checks that all assigned partitions are fully balanced.
|
||||
*
|
||||
* Only works for symmetrical subscriptions.
|
||||
*/
|
||||
int isFullyBalanced0(const char *function,
|
||||
int line,
|
||||
const rd_kafka_group_member_t *members,
|
||||
size_t member_cnt) {
|
||||
int min_assignment = INT_MAX;
|
||||
int max_assignment = -1;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < member_cnt; i++) {
|
||||
int size = members[i].rkgm_assignment->cnt;
|
||||
if (size < min_assignment)
|
||||
min_assignment = size;
|
||||
if (size > max_assignment)
|
||||
max_assignment = size;
|
||||
}
|
||||
|
||||
RD_UT_ASSERT(max_assignment - min_assignment <= 1,
|
||||
"%s:%d: Assignment not balanced: min %d, max %d", function,
|
||||
line, min_assignment, max_assignment);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
|
|
@ -1574,7 +879,6 @@ static int ut_assignors(void) {
|
|||
/* Run through test cases */
|
||||
for (i = 0; tests[i].name; i++) {
|
||||
int ie, it, im;
|
||||
rd_kafka_metadata_internal_t metadata_internal;
|
||||
rd_kafka_metadata_t metadata;
|
||||
rd_kafka_group_member_t *members;
|
||||
|
||||
|
|
@ -1582,38 +886,14 @@ static int ut_assignors(void) {
|
|||
metadata.topic_cnt = tests[i].topic_cnt;
|
||||
metadata.topics =
|
||||
rd_alloca(sizeof(*metadata.topics) * metadata.topic_cnt);
|
||||
metadata_internal.topics = rd_alloca(
|
||||
sizeof(*metadata_internal.topics) * metadata.topic_cnt);
|
||||
|
||||
memset(metadata.topics, 0,
|
||||
sizeof(*metadata.topics) * metadata.topic_cnt);
|
||||
memset(metadata_internal.topics, 0,
|
||||
sizeof(*metadata_internal.topics) * metadata.topic_cnt);
|
||||
|
||||
for (it = 0; it < metadata.topic_cnt; it++) {
|
||||
int pt;
|
||||
metadata.topics[it].topic =
|
||||
(char *)tests[i].topics[it].name;
|
||||
metadata.topics[it].partition_cnt =
|
||||
tests[i].topics[it].partition_cnt;
|
||||
metadata.topics[it].partitions =
|
||||
rd_alloca(metadata.topics[it].partition_cnt *
|
||||
sizeof(rd_kafka_metadata_partition_t));
|
||||
metadata_internal.topics[it].partitions = rd_alloca(
|
||||
metadata.topics[it].partition_cnt *
|
||||
sizeof(rd_kafka_metadata_partition_internal_t));
|
||||
for (pt = 0; pt < metadata.topics[it].partition_cnt;
|
||||
pt++) {
|
||||
metadata.topics[it].partitions[pt].id = pt;
|
||||
metadata.topics[it].partitions[pt].replica_cnt =
|
||||
0;
|
||||
metadata_internal.topics[it]
|
||||
.partitions[pt]
|
||||
.racks_cnt = 0;
|
||||
metadata_internal.topics[it]
|
||||
.partitions[pt]
|
||||
.racks = NULL;
|
||||
}
|
||||
metadata.topics[it].partitions = NULL; /* Not used */
|
||||
}
|
||||
|
||||
/* Create members */
|
||||
|
|
@ -1664,12 +944,9 @@ static int ut_assignors(void) {
|
|||
}
|
||||
|
||||
/* Run assignor */
|
||||
metadata_internal.metadata = metadata;
|
||||
err = rd_kafka_assignor_run(
|
||||
rk->rk_cgrp, rkas,
|
||||
(rd_kafka_metadata_t *)(&metadata_internal),
|
||||
members, tests[i].member_cnt, errstr,
|
||||
sizeof(errstr));
|
||||
err = rd_kafka_assignor_run(
|
||||
rk->rk_cgrp, rkas, &metadata, members,
|
||||
tests[i].member_cnt, errstr, sizeof(errstr));
|
||||
|
||||
RD_UT_ASSERT(!err, "Assignor case %s for %s failed: %s",
|
||||
tests[i].name,
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2015-2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* Copyright (c) 2015 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -29,7 +28,7 @@
|
|||
#ifndef _RDKAFKA_ASSIGNOR_H_
|
||||
#define _RDKAFKA_ASSIGNOR_H_
|
||||
|
||||
#include "rdkafka_metadata.h"
|
||||
|
||||
|
||||
/*!
|
||||
* Enumerates the different rebalance protocol types.
|
||||
|
|
@ -70,8 +69,6 @@ typedef struct rd_kafka_group_member_s {
|
|||
rd_kafkap_bytes_t *rkgm_member_metadata;
|
||||
/** Group generation id. */
|
||||
int rkgm_generation;
|
||||
/** Member rack id. */
|
||||
rd_kafkap_str_t *rkgm_rack_id;
|
||||
} rd_kafka_group_member_t;
|
||||
|
||||
|
||||
|
|
@ -81,13 +78,13 @@ int rd_kafka_group_member_find_subscription(rd_kafka_t *rk,
|
|||
const rd_kafka_group_member_t *rkgm,
|
||||
const char *topic);
|
||||
|
||||
|
||||
/**
|
||||
* Structure to hold metadata for a single topic and all its
|
||||
* subscribing members.
|
||||
*/
|
||||
typedef struct rd_kafka_assignor_topic_s {
|
||||
const rd_kafka_metadata_topic_t *metadata;
|
||||
const rd_kafka_metadata_topic_internal_t *metadata_internal;
|
||||
rd_list_t members; /* rd_kafka_group_member_t * */
|
||||
} rd_kafka_assignor_topic_t;
|
||||
|
||||
|
|
@ -123,8 +120,7 @@ typedef struct rd_kafka_assignor_s {
|
|||
const struct rd_kafka_assignor_s *rkas,
|
||||
void *assignor_state,
|
||||
const rd_list_t *topics,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions,
|
||||
const rd_kafkap_str_t *rack_id);
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions);
|
||||
|
||||
void (*rkas_on_assignment_cb)(
|
||||
const struct rd_kafka_assignor_s *rkas,
|
||||
|
|
@ -162,8 +158,7 @@ rd_kafka_resp_err_t rd_kafka_assignor_add(
|
|||
const struct rd_kafka_assignor_s *rkas,
|
||||
void *assignor_state,
|
||||
const rd_list_t *topics,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions,
|
||||
const rd_kafkap_str_t *rack_id),
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions),
|
||||
void (*on_assignment_cb)(const struct rd_kafka_assignor_s *rkas,
|
||||
void **assignor_state,
|
||||
const rd_kafka_topic_partition_list_t *assignment,
|
||||
|
|
@ -177,16 +172,13 @@ rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new(
|
|||
const rd_list_t *topics,
|
||||
const void *userdata,
|
||||
size_t userdata_size,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions,
|
||||
int generation,
|
||||
const rd_kafkap_str_t *rack_id);
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions);
|
||||
|
||||
rd_kafkap_bytes_t *rd_kafka_assignor_get_metadata_with_empty_userdata(
|
||||
const rd_kafka_assignor_t *rkas,
|
||||
void *assignor_state,
|
||||
const rd_list_t *topics,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions,
|
||||
const rd_kafkap_str_t *rack_id);
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions);
|
||||
|
||||
|
||||
void rd_kafka_assignor_update_subscription(
|
||||
|
|
@ -216,187 +208,5 @@ void rd_kafka_group_member_clear(rd_kafka_group_member_t *rkgm);
|
|||
rd_kafka_resp_err_t rd_kafka_range_assignor_init(rd_kafka_t *rk);
|
||||
rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_init(rd_kafka_t *rk);
|
||||
rd_kafka_resp_err_t rd_kafka_sticky_assignor_init(rd_kafka_t *rk);
|
||||
rd_bool_t
|
||||
rd_kafka_use_rack_aware_assignment(rd_kafka_assignor_topic_t **topics,
|
||||
size_t topic_cnt,
|
||||
const rd_kafka_metadata_internal_t *mdi);
|
||||
|
||||
/**
|
||||
* @name Common unit test functions, macros, and enums to use across assignors.
|
||||
*
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
/* Tests can be parametrized to contain either only broker racks, only consumer
|
||||
* racks or both.*/
|
||||
typedef enum {
|
||||
RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK = 0,
|
||||
RD_KAFKA_RANGE_ASSIGNOR_UT_NO_CONSUMER_RACK = 1,
|
||||
RD_KAFKA_RANGE_ASSIGNOR_UT_BROKER_AND_CONSUMER_RACK = 2,
|
||||
RD_KAFKA_RANGE_ASSIGNOR_UT_CONFIG_CNT = 3,
|
||||
} rd_kafka_assignor_ut_rack_config_t;
|
||||
|
||||
|
||||
void ut_populate_internal_broker_metadata(rd_kafka_metadata_internal_t *mdi,
|
||||
int num_broker_racks,
|
||||
rd_kafkap_str_t *all_racks[],
|
||||
size_t all_racks_cnt);
|
||||
|
||||
void ut_populate_internal_topic_metadata(rd_kafka_metadata_internal_t *mdi);
|
||||
|
||||
void ut_destroy_metadata(rd_kafka_metadata_t *md);
|
||||
|
||||
void ut_set_owned(rd_kafka_group_member_t *rkgm);
|
||||
|
||||
void ut_print_toppar_list(const rd_kafka_topic_partition_list_t *partitions);
|
||||
|
||||
void ut_init_member(rd_kafka_group_member_t *rkgm, const char *member_id, ...);
|
||||
|
||||
void ut_init_member_with_rackv(rd_kafka_group_member_t *rkgm,
|
||||
const char *member_id,
|
||||
const rd_kafkap_str_t *rack_id,
|
||||
...);
|
||||
|
||||
void ut_init_member_with_rack(rd_kafka_group_member_t *rkgm,
|
||||
const char *member_id,
|
||||
const rd_kafkap_str_t *rack_id,
|
||||
char *topics[],
|
||||
size_t topic_cnt);
|
||||
|
||||
int verifyAssignment0(const char *function,
|
||||
int line,
|
||||
rd_kafka_group_member_t *rkgm,
|
||||
...);
|
||||
|
||||
int verifyMultipleAssignment0(const char *function,
|
||||
int line,
|
||||
rd_kafka_group_member_t *rkgms,
|
||||
size_t member_cnt,
|
||||
...);
|
||||
|
||||
int verifyNumPartitionsWithRackMismatch0(const char *function,
|
||||
int line,
|
||||
rd_kafka_metadata_t *metadata,
|
||||
rd_kafka_group_member_t *rkgms,
|
||||
size_t member_cnt,
|
||||
int expectedNumMismatch);
|
||||
|
||||
#define verifyAssignment(rkgm, ...) \
|
||||
do { \
|
||||
if (verifyAssignment0(__FUNCTION__, __LINE__, rkgm, \
|
||||
__VA_ARGS__)) \
|
||||
return 1; \
|
||||
} while (0)
|
||||
|
||||
#define verifyMultipleAssignment(rkgms, member_cnt, ...) \
|
||||
do { \
|
||||
if (verifyMultipleAssignment0(__FUNCTION__, __LINE__, rkgms, \
|
||||
member_cnt, __VA_ARGS__)) \
|
||||
return 1; \
|
||||
} while (0)
|
||||
|
||||
#define verifyNumPartitionsWithRackMismatch(metadata, rkgms, member_cnt, \
|
||||
expectedNumMismatch) \
|
||||
do { \
|
||||
if (verifyNumPartitionsWithRackMismatch0( \
|
||||
__FUNCTION__, __LINE__, metadata, rkgms, member_cnt, \
|
||||
expectedNumMismatch)) \
|
||||
return 1; \
|
||||
} while (0)
|
||||
|
||||
int verifyValidityAndBalance0(const char *func,
|
||||
int line,
|
||||
rd_kafka_group_member_t *members,
|
||||
size_t member_cnt,
|
||||
const rd_kafka_metadata_t *metadata);
|
||||
|
||||
#define verifyValidityAndBalance(members, member_cnt, metadata) \
|
||||
do { \
|
||||
if (verifyValidityAndBalance0(__FUNCTION__, __LINE__, members, \
|
||||
member_cnt, metadata)) \
|
||||
return 1; \
|
||||
} while (0)
|
||||
|
||||
int isFullyBalanced0(const char *function,
|
||||
int line,
|
||||
const rd_kafka_group_member_t *members,
|
||||
size_t member_cnt);
|
||||
|
||||
#define isFullyBalanced(members, member_cnt) \
|
||||
do { \
|
||||
if (isFullyBalanced0(__FUNCTION__, __LINE__, members, \
|
||||
member_cnt)) \
|
||||
return 1; \
|
||||
} while (0)
|
||||
|
||||
/* Helper macro to initialize a consumer with or without a rack depending on the
|
||||
* value of parametrization. */
|
||||
#define ut_initMemberConditionalRack(member_ptr, member_id, rack, \
|
||||
parametrization, ...) \
|
||||
do { \
|
||||
if (parametrization == \
|
||||
RD_KAFKA_RANGE_ASSIGNOR_UT_NO_CONSUMER_RACK) { \
|
||||
ut_init_member(member_ptr, member_id, __VA_ARGS__); \
|
||||
} else { \
|
||||
ut_init_member_with_rackv(member_ptr, member_id, rack, \
|
||||
__VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/* Helper macro to initialize rd_kafka_metadata_t* with or without replicas
|
||||
* depending on the value of parametrization. This accepts variadic arguments
|
||||
* for topics. */
|
||||
#define ut_initMetadataConditionalRack(metadataPtr, replication_factor, \
|
||||
num_broker_racks, all_racks, \
|
||||
all_racks_cnt, parametrization, ...) \
|
||||
do { \
|
||||
int num_brokers = num_broker_racks > 0 \
|
||||
? replication_factor * num_broker_racks \
|
||||
: replication_factor; \
|
||||
if (parametrization == \
|
||||
RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK) { \
|
||||
*(metadataPtr) = \
|
||||
rd_kafka_metadata_new_topic_mockv(__VA_ARGS__); \
|
||||
} else { \
|
||||
*(metadataPtr) = \
|
||||
rd_kafka_metadata_new_topic_with_partition_replicas_mockv( \
|
||||
replication_factor, num_brokers, __VA_ARGS__); \
|
||||
ut_populate_internal_broker_metadata( \
|
||||
rd_kafka_metadata_get_internal(*(metadataPtr)), \
|
||||
num_broker_racks, all_racks, all_racks_cnt); \
|
||||
ut_populate_internal_topic_metadata( \
|
||||
rd_kafka_metadata_get_internal(*(metadataPtr))); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* Helper macro to initialize rd_kafka_metadata_t* with or without replicas
|
||||
* depending on the value of parametrization. This accepts a list of topics,
|
||||
* rather than being variadic.
|
||||
*/
|
||||
#define ut_initMetadataConditionalRack0( \
|
||||
metadataPtr, replication_factor, num_broker_racks, all_racks, \
|
||||
all_racks_cnt, parametrization, topics, topic_cnt) \
|
||||
do { \
|
||||
int num_brokers = num_broker_racks > 0 \
|
||||
? replication_factor * num_broker_racks \
|
||||
: replication_factor; \
|
||||
if (parametrization == \
|
||||
RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK) { \
|
||||
*(metadataPtr) = rd_kafka_metadata_new_topic_mock( \
|
||||
topics, topic_cnt, -1, 0); \
|
||||
} else { \
|
||||
*(metadataPtr) = rd_kafka_metadata_new_topic_mock( \
|
||||
topics, topic_cnt, replication_factor, \
|
||||
num_brokers); \
|
||||
ut_populate_internal_broker_metadata( \
|
||||
rd_kafka_metadata_get_internal(*(metadataPtr)), \
|
||||
num_broker_racks, all_racks, all_racks_cnt); \
|
||||
ut_populate_internal_topic_metadata( \
|
||||
rd_kafka_metadata_get_internal(*(metadataPtr))); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
#endif /* _RDKAFKA_ASSIGNOR_H_ */
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* Copyright (c) 2018 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -235,60 +234,19 @@ void rd_kafka_acl_result_free(void *ptr) {
|
|||
* @return A new allocated Node object.
|
||||
* Use rd_kafka_Node_destroy() to free when done.
|
||||
*/
|
||||
rd_kafka_Node_t *rd_kafka_Node_new(int32_t id,
|
||||
rd_kafka_Node_t *rd_kafka_Node_new(int id,
|
||||
const char *host,
|
||||
uint16_t port,
|
||||
const char *rack) {
|
||||
const char *rack_id) {
|
||||
rd_kafka_Node_t *ret = rd_calloc(1, sizeof(*ret));
|
||||
ret->id = id;
|
||||
ret->port = port;
|
||||
ret->host = rd_strdup(host);
|
||||
if (rack != NULL)
|
||||
ret->rack = rd_strdup(rack);
|
||||
if (rack_id != NULL)
|
||||
ret->rack_id = rd_strdup(rack_id);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Create a new Node object given a node id, and use broker information
|
||||
* to populate other fields.
|
||||
*
|
||||
* @return A new allocated Node object.
|
||||
* Use rd_kafka_Node_destroy() to free when done.
|
||||
* @remark The \p brokers_sorted and \p brokers_internal arrays are asumed to be
|
||||
* sorted by id.
|
||||
*/
|
||||
rd_kafka_Node_t *rd_kafka_Node_new_from_brokers(
|
||||
int32_t id,
|
||||
const struct rd_kafka_metadata_broker *brokers_sorted,
|
||||
const rd_kafka_metadata_broker_internal_t *brokers_internal,
|
||||
int broker_cnt) {
|
||||
rd_kafka_Node_t *node = rd_calloc(1, sizeof(*node));
|
||||
struct rd_kafka_metadata_broker key_sorted = {.id = id};
|
||||
rd_kafka_metadata_broker_internal_t key_internal = {.id = id};
|
||||
|
||||
struct rd_kafka_metadata_broker *broker =
|
||||
bsearch(&key_sorted, brokers_sorted, broker_cnt,
|
||||
sizeof(struct rd_kafka_metadata_broker),
|
||||
rd_kafka_metadata_broker_cmp);
|
||||
|
||||
rd_kafka_metadata_broker_internal_t *broker_internal =
|
||||
bsearch(&key_internal, brokers_internal, broker_cnt,
|
||||
sizeof(rd_kafka_metadata_broker_internal_t),
|
||||
rd_kafka_metadata_broker_internal_cmp);
|
||||
|
||||
node->id = id;
|
||||
|
||||
if (!broker)
|
||||
return node;
|
||||
|
||||
node->host = rd_strdup(broker->host);
|
||||
node->port = broker->port;
|
||||
if (broker_internal && broker_internal->rack_id)
|
||||
node->rack = rd_strdup(broker_internal->rack_id);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Copy \p src Node object
|
||||
*
|
||||
|
|
@ -297,26 +255,16 @@ rd_kafka_Node_t *rd_kafka_Node_new_from_brokers(
|
|||
* Use rd_kafka_Node_destroy() to free when done.
|
||||
*/
|
||||
rd_kafka_Node_t *rd_kafka_Node_copy(const rd_kafka_Node_t *src) {
|
||||
return rd_kafka_Node_new(src->id, src->host, src->port, src->rack);
|
||||
return rd_kafka_Node_new(src->id, src->host, src->port, src->rack_id);
|
||||
}
|
||||
|
||||
void rd_kafka_Node_destroy(rd_kafka_Node_t *node) {
|
||||
rd_free(node->host);
|
||||
if (node->rack)
|
||||
rd_free(node->rack);
|
||||
if (node->rack_id)
|
||||
rd_free(node->rack_id);
|
||||
rd_free(node);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Same as rd_kafka_Node_destroy, but for use as callback which accepts
|
||||
* (void *) arguments.
|
||||
*
|
||||
* @param node
|
||||
*/
|
||||
void rd_kafka_Node_free(void *node) {
|
||||
rd_kafka_Node_destroy((rd_kafka_Node_t *)node);
|
||||
}
|
||||
|
||||
int rd_kafka_Node_id(const rd_kafka_Node_t *node) {
|
||||
return node->id;
|
||||
}
|
||||
|
|
@ -328,82 +276,3 @@ const char *rd_kafka_Node_host(const rd_kafka_Node_t *node) {
|
|||
uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node) {
|
||||
return node->port;
|
||||
}
|
||||
|
||||
const char *rd_kafka_Node_rack(const rd_kafka_Node_t *node) {
|
||||
return node->rack;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Creates a new rd_kafka_topic_partition_result_t object.
|
||||
*/
|
||||
|
||||
rd_kafka_topic_partition_result_t *
|
||||
rd_kafka_topic_partition_result_new(const char *topic,
|
||||
int32_t partition,
|
||||
rd_kafka_resp_err_t err,
|
||||
const char *errstr) {
|
||||
|
||||
rd_kafka_topic_partition_result_t *new_result;
|
||||
|
||||
new_result = rd_calloc(1, sizeof(*new_result));
|
||||
new_result->topic_partition =
|
||||
rd_kafka_topic_partition_new(topic, partition);
|
||||
new_result->topic_partition->err = err;
|
||||
new_result->error = rd_kafka_error_new(err, "%s", errstr);
|
||||
|
||||
return new_result;
|
||||
}
|
||||
|
||||
const rd_kafka_topic_partition_t *rd_kafka_topic_partition_result_partition(
|
||||
const rd_kafka_topic_partition_result_t *partition_result) {
|
||||
return partition_result->topic_partition;
|
||||
}
|
||||
|
||||
const rd_kafka_error_t *rd_kafka_topic_partition_result_error(
|
||||
const rd_kafka_topic_partition_result_t *partition_result) {
|
||||
return partition_result->error;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroys the rd_kafka_topic_partition_result_t object.
|
||||
*/
|
||||
void rd_kafka_topic_partition_result_destroy(
|
||||
rd_kafka_topic_partition_result_t *partition_result) {
|
||||
rd_kafka_topic_partition_destroy(partition_result->topic_partition);
|
||||
rd_kafka_error_destroy(partition_result->error);
|
||||
rd_free(partition_result);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroys the array of rd_kafka_topic_partition_result_t objects.
|
||||
*/
|
||||
void rd_kafka_topic_partition_result_destroy_array(
|
||||
rd_kafka_topic_partition_result_t **partition_results,
|
||||
int32_t partition_results_cnt) {
|
||||
int32_t i;
|
||||
for (i = 0; i < partition_results_cnt; i++) {
|
||||
rd_kafka_topic_partition_result_destroy(partition_results[i]);
|
||||
}
|
||||
}
|
||||
|
||||
rd_kafka_topic_partition_result_t *rd_kafka_topic_partition_result_copy(
|
||||
const rd_kafka_topic_partition_result_t *src) {
|
||||
return rd_kafka_topic_partition_result_new(
|
||||
src->topic_partition->topic, src->topic_partition->partition,
|
||||
src->topic_partition->err, src->error->errstr);
|
||||
}
|
||||
|
||||
void *rd_kafka_topic_partition_result_copy_opaque(const void *src,
|
||||
void *opaque) {
|
||||
return rd_kafka_topic_partition_result_copy(
|
||||
(const rd_kafka_topic_partition_result_t *)src);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Frees the memory allocated for a
|
||||
* topic partition result object by calling
|
||||
* its destroy function.
|
||||
*/
|
||||
void rd_kafka_topic_partition_result_free(void *ptr) {
|
||||
rd_kafka_topic_partition_result_destroy(ptr);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* Copyright (c) 2018 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -108,67 +107,14 @@ typedef struct rd_kafka_Node_s {
|
|||
int id; /*< Node id */
|
||||
char *host; /*< Node host */
|
||||
uint16_t port; /*< Node port */
|
||||
char *rack; /*< (optional) Node rack id */
|
||||
char *rack_id; /*< (optional) Node rack id */
|
||||
} rd_kafka_Node_t;
|
||||
|
||||
rd_kafka_Node_t *rd_kafka_Node_new(int32_t id,
|
||||
const char *host,
|
||||
uint16_t port,
|
||||
const char *rack_id);
|
||||
|
||||
rd_kafka_Node_t *rd_kafka_Node_new_from_brokers(
|
||||
int32_t id,
|
||||
const struct rd_kafka_metadata_broker *brokers_sorted,
|
||||
const rd_kafka_metadata_broker_internal_t *brokers_internal,
|
||||
int broker_cnt);
|
||||
rd_kafka_Node_t *
|
||||
rd_kafka_Node_new(int id, const char *host, uint16_t port, const char *rack_id);
|
||||
|
||||
rd_kafka_Node_t *rd_kafka_Node_copy(const rd_kafka_Node_t *src);
|
||||
|
||||
void rd_kafka_Node_destroy(rd_kafka_Node_t *node);
|
||||
|
||||
void rd_kafka_Node_free(void *node);
|
||||
|
||||
/**
|
||||
* @brief Represents a topic partition result.
|
||||
*
|
||||
* @remark Public Type
|
||||
*/
|
||||
struct rd_kafka_topic_partition_result_s {
|
||||
rd_kafka_topic_partition_t *topic_partition;
|
||||
rd_kafka_error_t *error;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Create a new rd_kafka_topic_partition_result_t object.
|
||||
*
|
||||
* @param topic The topic name.
|
||||
* @param partition The partition number.
|
||||
* @param err The error code.
|
||||
* @param errstr The error string.
|
||||
*
|
||||
* @returns a newly allocated rd_kafka_topic_partition_result_t object.
|
||||
* Use rd_kafka_topic_partition_result_destroy() to free object when
|
||||
* done.
|
||||
*/
|
||||
rd_kafka_topic_partition_result_t *
|
||||
rd_kafka_topic_partition_result_new(const char *topic,
|
||||
int32_t partition,
|
||||
rd_kafka_resp_err_t err,
|
||||
const char *errstr);
|
||||
|
||||
rd_kafka_topic_partition_result_t *rd_kafka_topic_partition_result_copy(
|
||||
const rd_kafka_topic_partition_result_t *src);
|
||||
|
||||
void *rd_kafka_topic_partition_result_copy_opaque(const void *src,
|
||||
void *opaque);
|
||||
|
||||
void rd_kafka_topic_partition_result_destroy(
|
||||
rd_kafka_topic_partition_result_t *partition_result);
|
||||
|
||||
void rd_kafka_topic_partition_result_destroy_array(
|
||||
rd_kafka_topic_partition_result_t **partition_results,
|
||||
int32_t partition_results_cnt);
|
||||
|
||||
void rd_kafka_topic_partition_result_free(void *ptr);
|
||||
|
||||
#endif /* _RDKAFKA_AUX_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2018-2022, Magnus Edenhill
|
||||
* Copyright (c) 2018 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* Copyright (c) 2012-2015, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -50,7 +49,6 @@
|
|||
#include <ctype.h>
|
||||
|
||||
#include "rd.h"
|
||||
#include "rdaddr.h"
|
||||
#include "rdkafka_int.h"
|
||||
#include "rdkafka_msg.h"
|
||||
#include "rdkafka_msgset.h"
|
||||
|
|
@ -58,7 +56,6 @@
|
|||
#include "rdkafka_partition.h"
|
||||
#include "rdkafka_broker.h"
|
||||
#include "rdkafka_offset.h"
|
||||
#include "rdkafka_telemetry.h"
|
||||
#include "rdkafka_transport.h"
|
||||
#include "rdkafka_proto.h"
|
||||
#include "rdkafka_buf.h"
|
||||
|
|
@ -82,9 +79,9 @@
|
|||
static const int rd_kafka_max_block_ms = 1000;
|
||||
|
||||
const char *rd_kafka_broker_state_names[] = {
|
||||
"INIT", "DOWN", "TRY_CONNECT", "CONNECT", "SSL_HANDSHAKE",
|
||||
"AUTH_LEGACY", "UP", "UPDATE", "APIVERSION_QUERY", "AUTH_HANDSHAKE",
|
||||
"AUTH_REQ", "REAUTH"};
|
||||
"INIT", "DOWN", "TRY_CONNECT", "CONNECT", "SSL_HANDSHAKE",
|
||||
"AUTH_LEGACY", "UP", "UPDATE", "APIVERSION_QUERY", "AUTH_HANDSHAKE",
|
||||
"AUTH_REQ"};
|
||||
|
||||
const char *rd_kafka_secproto_names[] = {
|
||||
[RD_KAFKA_PROTO_PLAINTEXT] = "plaintext",
|
||||
|
|
@ -235,37 +232,31 @@ static void rd_kafka_broker_features_set(rd_kafka_broker_t *rkb, int features) {
|
|||
rd_kafka_features2str(rkb->rkb_features));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Check and return supported ApiVersion for \p ApiKey.
|
||||
*
|
||||
* @returns the highest supported ApiVersion in the specified range (inclusive)
|
||||
* or -1 if the ApiKey is not supported or no matching ApiVersion.
|
||||
* The current feature set is also returned in \p featuresp
|
||||
*
|
||||
* @remark Same as rd_kafka_broker_ApiVersion_supported except for locking.
|
||||
*
|
||||
* @locks rd_kafka_broker_lock() if do_lock is rd_false
|
||||
* @locks_acquired rd_kafka_broker_lock() if do_lock is rd_true
|
||||
* @locks none
|
||||
* @locality any
|
||||
*/
|
||||
int16_t rd_kafka_broker_ApiVersion_supported0(rd_kafka_broker_t *rkb,
|
||||
int16_t ApiKey,
|
||||
int16_t minver,
|
||||
int16_t maxver,
|
||||
int *featuresp,
|
||||
rd_bool_t do_lock) {
|
||||
int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb,
|
||||
int16_t ApiKey,
|
||||
int16_t minver,
|
||||
int16_t maxver,
|
||||
int *featuresp) {
|
||||
struct rd_kafka_ApiVersion skel = {.ApiKey = ApiKey};
|
||||
struct rd_kafka_ApiVersion ret = RD_ZERO_INIT, *retp;
|
||||
|
||||
if (do_lock)
|
||||
rd_kafka_broker_lock(rkb);
|
||||
rd_kafka_broker_lock(rkb);
|
||||
if (featuresp)
|
||||
*featuresp = rkb->rkb_features;
|
||||
|
||||
if (rkb->rkb_features & RD_KAFKA_FEATURE_UNITTEST) {
|
||||
/* For unit tests let the broker support everything. */
|
||||
if (do_lock)
|
||||
rd_kafka_broker_unlock(rkb);
|
||||
rd_kafka_broker_unlock(rkb);
|
||||
return maxver;
|
||||
}
|
||||
|
||||
|
|
@ -274,9 +265,7 @@ int16_t rd_kafka_broker_ApiVersion_supported0(rd_kafka_broker_t *rkb,
|
|||
sizeof(*rkb->rkb_ApiVersions), rd_kafka_ApiVersion_key_cmp);
|
||||
if (retp)
|
||||
ret = *retp;
|
||||
|
||||
if (do_lock)
|
||||
rd_kafka_broker_unlock(rkb);
|
||||
rd_kafka_broker_unlock(rkb);
|
||||
|
||||
if (!retp)
|
||||
return -1;
|
||||
|
|
@ -292,24 +281,6 @@ int16_t rd_kafka_broker_ApiVersion_supported0(rd_kafka_broker_t *rkb,
|
|||
return maxver;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check and return supported ApiVersion for \p ApiKey.
|
||||
*
|
||||
* @returns the highest supported ApiVersion in the specified range (inclusive)
|
||||
* or -1 if the ApiKey is not supported or no matching ApiVersion.
|
||||
* The current feature set is also returned in \p featuresp
|
||||
* @locks none
|
||||
* @locks_acquired rd_kafka_broker_lock()
|
||||
* @locality any
|
||||
*/
|
||||
int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb,
|
||||
int16_t ApiKey,
|
||||
int16_t minver,
|
||||
int16_t maxver,
|
||||
int *featuresp) {
|
||||
return rd_kafka_broker_ApiVersion_supported0(
|
||||
rkb, ApiKey, minver, maxver, featuresp, rd_true /* do_lock */);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set broker state.
|
||||
|
|
@ -583,7 +554,6 @@ void rd_kafka_broker_fail(rd_kafka_broker_t *rkb,
|
|||
va_list ap;
|
||||
rd_kafka_bufq_t tmpq_waitresp, tmpq;
|
||||
int old_state;
|
||||
rd_kafka_toppar_t *rktp;
|
||||
|
||||
rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
|
||||
|
||||
|
|
@ -602,8 +572,6 @@ void rd_kafka_broker_fail(rd_kafka_broker_t *rkb,
|
|||
rkb->rkb_recv_buf = NULL;
|
||||
}
|
||||
|
||||
rkb->rkb_reauth_in_progress = rd_false;
|
||||
|
||||
va_start(ap, fmt);
|
||||
rd_kafka_broker_set_error(rkb, level, err, fmt, ap);
|
||||
va_end(ap);
|
||||
|
|
@ -622,11 +590,6 @@ void rd_kafka_broker_fail(rd_kafka_broker_t *rkb,
|
|||
old_state = rkb->rkb_state;
|
||||
rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_DOWN);
|
||||
|
||||
/* Stop any pending reauth timer, since a teardown/reconnect will
|
||||
* require a new timer. */
|
||||
rd_kafka_timer_stop(&rkb->rkb_rk->rk_timers, &rkb->rkb_sasl_reauth_tmr,
|
||||
1 /*lock*/);
|
||||
|
||||
/* Unlock broker since a requeue will try to lock it. */
|
||||
rd_kafka_broker_unlock(rkb);
|
||||
|
||||
|
|
@ -678,35 +641,6 @@ void rd_kafka_broker_fail(rd_kafka_broker_t *rkb,
|
|||
rd_kafka_bufq_dump(rkb, "BRKOUTBUFS", &rkb->rkb_outbufs);
|
||||
}
|
||||
|
||||
/* If this broker acts as the preferred (follower) replica for any
|
||||
* partition, delegate the partition back to the leader. */
|
||||
TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) {
|
||||
rd_kafka_toppar_lock(rktp);
|
||||
if (unlikely(rktp->rktp_broker != rkb)) {
|
||||
/* Currently migrating away from this
|
||||
* broker, skip. */
|
||||
rd_kafka_toppar_unlock(rktp);
|
||||
continue;
|
||||
}
|
||||
rd_kafka_toppar_unlock(rktp);
|
||||
|
||||
if (rktp->rktp_leader_id != rktp->rktp_broker_id) {
|
||||
rd_kafka_toppar_delegate_to_leader(rktp);
|
||||
}
|
||||
}
|
||||
|
||||
/* If the broker is the preferred telemetry broker, remove it. */
|
||||
/* TODO(milind): check if this right. */
|
||||
mtx_lock(&rkb->rkb_rk->rk_telemetry.lock);
|
||||
if (rkb->rkb_rk->rk_telemetry.preferred_broker == rkb) {
|
||||
rd_kafka_dbg(rkb->rkb_rk, TELEMETRY, "TELBRKLOST",
|
||||
"Lost telemetry broker %s due to state change",
|
||||
rkb->rkb_name);
|
||||
rd_kafka_broker_destroy(
|
||||
rkb->rkb_rk->rk_telemetry.preferred_broker);
|
||||
rkb->rkb_rk->rk_telemetry.preferred_broker = NULL;
|
||||
}
|
||||
mtx_unlock(&rkb->rkb_rk->rk_telemetry.lock);
|
||||
|
||||
/* Query for topic leaders to quickly pick up on failover. */
|
||||
if (err != RD_KAFKA_RESP_ERR__DESTROY &&
|
||||
|
|
@ -981,22 +915,11 @@ static void rd_kafka_broker_timeout_scan(rd_kafka_broker_t *rkb, rd_ts_t now) {
|
|||
char rttinfo[32];
|
||||
/* Print average RTT (if avail) to help diagnose. */
|
||||
rd_avg_calc(&rkb->rkb_avg_rtt, now);
|
||||
rd_avg_calc(
|
||||
&rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt,
|
||||
now);
|
||||
if (rkb->rkb_avg_rtt.ra_v.avg)
|
||||
rd_snprintf(rttinfo, sizeof(rttinfo),
|
||||
" (average rtt %.3fms)",
|
||||
(float)(rkb->rkb_avg_rtt.ra_v.avg /
|
||||
1000.0f));
|
||||
else if (rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt
|
||||
.ra_v.avg)
|
||||
rd_snprintf(
|
||||
rttinfo, sizeof(rttinfo),
|
||||
" (average rtt %.3fms)",
|
||||
(float)(rkb->rkb_telemetry.rd_avg_current
|
||||
.rkb_avg_rtt.ra_v.avg /
|
||||
1000.0f));
|
||||
else
|
||||
rttinfo[0] = 0;
|
||||
rd_kafka_broker_fail(rkb, LOG_ERR,
|
||||
|
|
@ -1389,15 +1312,15 @@ void rd_kafka_brokers_broadcast_state_change(rd_kafka_t *rk) {
|
|||
* @locks rd_kafka_*lock() MUST be held
|
||||
* @locality any
|
||||
*/
|
||||
rd_kafka_broker_t *rd_kafka_broker_random0(const char *func,
|
||||
int line,
|
||||
rd_kafka_t *rk,
|
||||
rd_bool_t is_up,
|
||||
int state,
|
||||
int *filtered_cnt,
|
||||
int (*filter)(rd_kafka_broker_t *rk,
|
||||
void *opaque),
|
||||
void *opaque) {
|
||||
static rd_kafka_broker_t *
|
||||
rd_kafka_broker_random0(const char *func,
|
||||
int line,
|
||||
rd_kafka_t *rk,
|
||||
rd_bool_t is_up,
|
||||
int state,
|
||||
int *filtered_cnt,
|
||||
int (*filter)(rd_kafka_broker_t *rk, void *opaque),
|
||||
void *opaque) {
|
||||
rd_kafka_broker_t *rkb, *good = NULL;
|
||||
int cnt = 0;
|
||||
int fcnt = 0;
|
||||
|
|
@ -1432,6 +1355,11 @@ rd_kafka_broker_t *rd_kafka_broker_random0(const char *func,
|
|||
return good;
|
||||
}
|
||||
|
||||
#define rd_kafka_broker_random(rk, state, filter, opaque) \
|
||||
rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk, rd_false, state, \
|
||||
NULL, filter, opaque)
|
||||
|
||||
|
||||
/**
|
||||
* @returns the broker (with refcnt increased) with the highest weight based
|
||||
* based on the provided weighing function.
|
||||
|
|
@ -1871,32 +1799,6 @@ static rd_kafka_buf_t *rd_kafka_waitresp_find(rd_kafka_broker_t *rkb,
|
|||
/* Convert ts_sent to RTT */
|
||||
rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_sent;
|
||||
rd_avg_add(&rkb->rkb_avg_rtt, rkbuf->rkbuf_ts_sent);
|
||||
rd_avg_add(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt,
|
||||
rkbuf->rkbuf_ts_sent);
|
||||
|
||||
switch (rkbuf->rkbuf_reqhdr.ApiKey) {
|
||||
case RD_KAFKAP_Fetch:
|
||||
if (rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER)
|
||||
rd_avg_add(&rkb->rkb_telemetry.rd_avg_current
|
||||
.rkb_avg_fetch_latency,
|
||||
rkbuf->rkbuf_ts_sent);
|
||||
break;
|
||||
case RD_KAFKAP_OffsetCommit:
|
||||
if (rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER)
|
||||
rd_avg_add(
|
||||
&rkb->rkb_rk->rk_telemetry.rd_avg_current
|
||||
.rk_avg_commit_latency,
|
||||
rkbuf->rkbuf_ts_sent);
|
||||
break;
|
||||
case RD_KAFKAP_Produce:
|
||||
if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER)
|
||||
rd_avg_add(&rkb->rkb_telemetry.rd_avg_current
|
||||
.rkb_avg_produce_latency,
|
||||
rkbuf->rkbuf_ts_sent);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING &&
|
||||
rd_atomic32_sub(&rkb->rkb_blocking_request_cnt, 1) == 1)
|
||||
|
|
@ -1915,7 +1817,7 @@ static rd_kafka_buf_t *rd_kafka_waitresp_find(rd_kafka_broker_t *rkb,
|
|||
*/
|
||||
static int rd_kafka_req_response(rd_kafka_broker_t *rkb,
|
||||
rd_kafka_buf_t *rkbuf) {
|
||||
rd_kafka_buf_t *req = NULL;
|
||||
rd_kafka_buf_t *req;
|
||||
int log_decode_errors = LOG_ERR;
|
||||
|
||||
rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
|
||||
|
|
@ -2317,10 +2219,8 @@ static int rd_kafka_broker_connect(rd_kafka_broker_t *rkb) {
|
|||
* @locality Broker thread
|
||||
*/
|
||||
void rd_kafka_broker_connect_up(rd_kafka_broker_t *rkb) {
|
||||
int features;
|
||||
|
||||
rkb->rkb_max_inflight = rkb->rkb_rk->rk_conf.max_inflight;
|
||||
rkb->rkb_reauth_in_progress = rd_false;
|
||||
rkb->rkb_max_inflight = rkb->rkb_rk->rk_conf.max_inflight;
|
||||
|
||||
rd_kafka_broker_lock(rkb);
|
||||
rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_UP);
|
||||
|
|
@ -2333,18 +2233,6 @@ void rd_kafka_broker_connect_up(rd_kafka_broker_t *rkb) {
|
|||
NULL, rkb, rd_false /*dont force*/, "connected") ==
|
||||
RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
|
||||
rd_kafka_metadata_refresh_brokers(NULL, rkb, "connected");
|
||||
|
||||
if (rd_kafka_broker_ApiVersion_supported(
|
||||
rkb, RD_KAFKAP_GetTelemetrySubscriptions, 0, 0, &features) !=
|
||||
-1 &&
|
||||
rkb->rkb_rk->rk_conf.enable_metrics_push) {
|
||||
rd_kafka_t *rk = rkb->rkb_rk;
|
||||
rd_kafka_op_t *rko =
|
||||
rd_kafka_op_new(RD_KAFKA_OP_SET_TELEMETRY_BROKER);
|
||||
rd_kafka_broker_keep(rkb);
|
||||
rko->rko_u.telemetry_broker.rkb = rkb;
|
||||
rd_kafka_q_enq(rk->rk_ops, rko);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -2883,10 +2771,6 @@ int rd_kafka_send(rd_kafka_broker_t *rkb) {
|
|||
/* Add to outbuf_latency averager */
|
||||
rd_avg_add(&rkb->rkb_avg_outbuf_latency,
|
||||
rkbuf->rkbuf_ts_sent - rkbuf->rkbuf_ts_enq);
|
||||
rd_avg_add(
|
||||
&rkb->rkb_telemetry.rd_avg_current.rkb_avg_outbuf_latency,
|
||||
rkbuf->rkbuf_ts_sent - rkbuf->rkbuf_ts_enq);
|
||||
|
||||
|
||||
if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING &&
|
||||
rd_atomic32_add(&rkb->rkb_blocking_request_cnt, 1) == 1)
|
||||
|
|
@ -2912,7 +2796,6 @@ int rd_kafka_send(rd_kafka_broker_t *rkb) {
|
|||
*/
|
||||
void rd_kafka_broker_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) {
|
||||
|
||||
int64_t backoff = 0;
|
||||
/* Restore original replyq since replyq.q will have been NULLed
|
||||
* by buf_callback()/replyq_enq(). */
|
||||
if (!rkbuf->rkbuf_replyq.q && rkbuf->rkbuf_orig_replyq.q) {
|
||||
|
|
@ -2940,24 +2823,9 @@ void rd_kafka_broker_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) {
|
|||
rkb->rkb_rk->rk_conf.retry_backoff_ms);
|
||||
|
||||
rd_atomic64_add(&rkb->rkb_c.tx_retries, 1);
|
||||
/* In some cases, failed Produce requests do not increment the retry
|
||||
* count, see rd_kafka_handle_Produce_error. */
|
||||
if (rkbuf->rkbuf_retries > 0)
|
||||
backoff = (1 << (rkbuf->rkbuf_retries - 1)) *
|
||||
(rkb->rkb_rk->rk_conf.retry_backoff_ms);
|
||||
else
|
||||
backoff = rkb->rkb_rk->rk_conf.retry_backoff_ms;
|
||||
|
||||
/* We are multiplying by 10 as (backoff_ms * percent * 1000)/100 ->
|
||||
* backoff_ms * jitter * 10 */
|
||||
backoff = rd_jitter(100 - RD_KAFKA_RETRY_JITTER_PERCENT,
|
||||
100 + RD_KAFKA_RETRY_JITTER_PERCENT) *
|
||||
backoff * 10;
|
||||
|
||||
if (backoff > rkb->rkb_rk->rk_conf.retry_backoff_max_ms * 1000)
|
||||
backoff = rkb->rkb_rk->rk_conf.retry_backoff_max_ms * 1000;
|
||||
|
||||
rkbuf->rkbuf_ts_retry = rd_clock() + backoff;
|
||||
rkbuf->rkbuf_ts_retry =
|
||||
rd_clock() + (rkb->rkb_rk->rk_conf.retry_backoff_ms * 1000);
|
||||
/* Precaution: time out the request if it hasn't moved from the
|
||||
* retry queue within the retry interval (such as when the broker is
|
||||
* down). */
|
||||
|
|
@ -3010,10 +2878,9 @@ static void rd_kafka_broker_retry_bufs_move(rd_kafka_broker_t *rkb,
|
|||
* To avoid extra iterations, the \p err and \p status are set on
|
||||
* the message as they are popped off the OP_DR msgq in rd_kafka_poll() et.al
|
||||
*/
|
||||
void rd_kafka_dr_msgq0(rd_kafka_topic_t *rkt,
|
||||
rd_kafka_msgq_t *rkmq,
|
||||
rd_kafka_resp_err_t err,
|
||||
const rd_kafka_Produce_result_t *presult) {
|
||||
void rd_kafka_dr_msgq(rd_kafka_topic_t *rkt,
|
||||
rd_kafka_msgq_t *rkmq,
|
||||
rd_kafka_resp_err_t err) {
|
||||
rd_kafka_t *rk = rkt->rkt_rk;
|
||||
|
||||
if (unlikely(rd_kafka_msgq_len(rkmq) == 0))
|
||||
|
|
@ -3024,11 +2891,7 @@ void rd_kafka_dr_msgq0(rd_kafka_topic_t *rkt,
|
|||
rd_kafka_msgq_len(rkmq));
|
||||
|
||||
/* Call on_acknowledgement() interceptors */
|
||||
rd_kafka_interceptors_on_acknowledgement_queue(
|
||||
rk, rkmq,
|
||||
(presult && presult->record_errors_cnt > 1)
|
||||
? RD_KAFKA_RESP_ERR_NO_ERROR
|
||||
: err);
|
||||
rd_kafka_interceptors_on_acknowledgement_queue(rk, rkmq, err);
|
||||
|
||||
if (rk->rk_drmode != RD_KAFKA_DR_MODE_NONE &&
|
||||
(!rk->rk_conf.dr_err_only || err)) {
|
||||
|
|
@ -3038,9 +2901,6 @@ void rd_kafka_dr_msgq0(rd_kafka_topic_t *rkt,
|
|||
rko = rd_kafka_op_new(RD_KAFKA_OP_DR);
|
||||
rko->rko_err = err;
|
||||
rko->rko_u.dr.rkt = rd_kafka_topic_keep(rkt);
|
||||
if (presult)
|
||||
rko->rko_u.dr.presult =
|
||||
rd_kafka_Produce_result_copy(presult);
|
||||
rd_kafka_msgq_init(&rko->rko_u.dr.msgq);
|
||||
|
||||
/* Move all messages to op's msgq */
|
||||
|
|
@ -3574,20 +3434,6 @@ rd_kafka_broker_op_serve(rd_kafka_broker_t *rkb, rd_kafka_op_t *rko) {
|
|||
wakeup = rd_true;
|
||||
break;
|
||||
|
||||
case RD_KAFKA_OP_SASL_REAUTH:
|
||||
rd_rkb_dbg(rkb, BROKER, "REAUTH", "Received REAUTH op");
|
||||
|
||||
/* We don't need a lock for rkb_max_inflight. It's changed only
|
||||
* on the broker thread. */
|
||||
rkb->rkb_max_inflight = 1;
|
||||
|
||||
rd_kafka_broker_lock(rkb);
|
||||
rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_REAUTH);
|
||||
rd_kafka_broker_unlock(rkb);
|
||||
|
||||
wakeup = rd_true;
|
||||
break;
|
||||
|
||||
default:
|
||||
rd_kafka_assert(rkb->rkb_rk, !*"unhandled op type");
|
||||
break;
|
||||
|
|
@ -4665,15 +4511,8 @@ static int rd_kafka_broker_thread_main(void *arg) {
|
|||
rd_kafka_broker_addresses_exhausted(rkb))
|
||||
rd_kafka_broker_update_reconnect_backoff(
|
||||
rkb, &rkb->rkb_rk->rk_conf, rd_clock());
|
||||
/* If we haven't made progress from the last state, and
|
||||
* if we have exceeded
|
||||
* socket_connection_setup_timeout_ms, then error out.
|
||||
* Don't error out in case this is a reauth, for which
|
||||
* socket_connection_setup_timeout_ms is not
|
||||
* applicable. */
|
||||
else if (
|
||||
rkb->rkb_state == orig_state &&
|
||||
!rkb->rkb_reauth_in_progress &&
|
||||
rd_clock() >=
|
||||
(rkb->rkb_ts_connect +
|
||||
(rd_ts_t)rk->rk_conf
|
||||
|
|
@ -4688,22 +4527,6 @@ static int rd_kafka_broker_thread_main(void *arg) {
|
|||
|
||||
break;
|
||||
|
||||
case RD_KAFKA_BROKER_STATE_REAUTH:
|
||||
/* Since we've already authenticated once, the provider
|
||||
* should be ready. */
|
||||
rd_assert(rd_kafka_sasl_ready(rkb->rkb_rk));
|
||||
|
||||
/* Since we aren't disconnecting, the transport isn't
|
||||
* destroyed, and as a consequence, some of the SASL
|
||||
* state leaks unless we destroy it before the reauth.
|
||||
*/
|
||||
rd_kafka_sasl_close(rkb->rkb_transport);
|
||||
|
||||
rkb->rkb_reauth_in_progress = rd_true;
|
||||
|
||||
rd_kafka_broker_connect_auth(rkb);
|
||||
break;
|
||||
|
||||
case RD_KAFKA_BROKER_STATE_UPDATE:
|
||||
/* FALLTHRU */
|
||||
case RD_KAFKA_BROKER_STATE_UP:
|
||||
|
|
@ -4825,27 +4648,6 @@ void rd_kafka_broker_destroy_final(rd_kafka_broker_t *rkb) {
|
|||
rd_avg_destroy(&rkb->rkb_avg_outbuf_latency);
|
||||
rd_avg_destroy(&rkb->rkb_avg_rtt);
|
||||
rd_avg_destroy(&rkb->rkb_avg_throttle);
|
||||
rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt);
|
||||
rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt);
|
||||
rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_throttle);
|
||||
rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_throttle);
|
||||
rd_avg_destroy(
|
||||
&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_outbuf_latency);
|
||||
rd_avg_destroy(
|
||||
&rkb->rkb_telemetry.rd_avg_current.rkb_avg_outbuf_latency);
|
||||
|
||||
if (rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER) {
|
||||
rd_avg_destroy(
|
||||
&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_fetch_latency);
|
||||
rd_avg_destroy(
|
||||
&rkb->rkb_telemetry.rd_avg_current.rkb_avg_fetch_latency);
|
||||
} else if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER) {
|
||||
rd_avg_destroy(
|
||||
&rkb->rkb_telemetry.rd_avg_current.rkb_avg_produce_latency);
|
||||
rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_rollover
|
||||
.rkb_avg_produce_latency);
|
||||
}
|
||||
|
||||
|
||||
mtx_lock(&rkb->rkb_logname_lock);
|
||||
rd_free(rkb->rkb_logname);
|
||||
|
|
@ -4853,9 +4655,6 @@ void rd_kafka_broker_destroy_final(rd_kafka_broker_t *rkb) {
|
|||
mtx_unlock(&rkb->rkb_logname_lock);
|
||||
mtx_destroy(&rkb->rkb_logname_lock);
|
||||
|
||||
rd_kafka_timer_stop(&rkb->rkb_rk->rk_timers, &rkb->rkb_sasl_reauth_tmr,
|
||||
1 /*lock*/);
|
||||
|
||||
mtx_destroy(&rkb->rkb_lock);
|
||||
|
||||
rd_refcnt_destroy(&rkb->rkb_refcnt);
|
||||
|
|
@ -4933,50 +4732,13 @@ rd_kafka_broker_t *rd_kafka_broker_add(rd_kafka_t *rk,
|
|||
rd_kafka_bufq_init(&rkb->rkb_retrybufs);
|
||||
rkb->rkb_ops = rd_kafka_q_new(rk);
|
||||
rd_avg_init(&rkb->rkb_avg_int_latency, RD_AVG_GAUGE, 0, 100 * 1000, 2,
|
||||
rk->rk_conf.stats_interval_ms);
|
||||
rk->rk_conf.stats_interval_ms ? 1 : 0);
|
||||
rd_avg_init(&rkb->rkb_avg_outbuf_latency, RD_AVG_GAUGE, 0, 100 * 1000,
|
||||
2, rk->rk_conf.stats_interval_ms);
|
||||
2, rk->rk_conf.stats_interval_ms ? 1 : 0);
|
||||
rd_avg_init(&rkb->rkb_avg_rtt, RD_AVG_GAUGE, 0, 500 * 1000, 2,
|
||||
rk->rk_conf.stats_interval_ms);
|
||||
rk->rk_conf.stats_interval_ms ? 1 : 0);
|
||||
rd_avg_init(&rkb->rkb_avg_throttle, RD_AVG_GAUGE, 0, 5000 * 1000, 2,
|
||||
rk->rk_conf.stats_interval_ms);
|
||||
rd_avg_init(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt,
|
||||
RD_AVG_GAUGE, 0, 500 * 1000, 2,
|
||||
rk->rk_conf.enable_metrics_push);
|
||||
rd_avg_init(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt,
|
||||
RD_AVG_GAUGE, 0, 500 * 1000, 2,
|
||||
rk->rk_conf.enable_metrics_push);
|
||||
rd_avg_init(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_throttle,
|
||||
RD_AVG_GAUGE, 0, 5000 * 1000, 2,
|
||||
rk->rk_conf.enable_metrics_push);
|
||||
rd_avg_init(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_throttle,
|
||||
RD_AVG_GAUGE, 0, 5000 * 1000, 2,
|
||||
rk->rk_conf.enable_metrics_push);
|
||||
rd_avg_init(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_outbuf_latency,
|
||||
RD_AVG_GAUGE, 0, 100 * 1000, 2,
|
||||
rk->rk_conf.enable_metrics_push);
|
||||
rd_avg_init(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_outbuf_latency,
|
||||
RD_AVG_GAUGE, 0, 100 * 1000, 2,
|
||||
rk->rk_conf.enable_metrics_push);
|
||||
|
||||
if (rk->rk_type == RD_KAFKA_CONSUMER) {
|
||||
rd_avg_init(
|
||||
&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_fetch_latency,
|
||||
RD_AVG_GAUGE, 0, 500 * 1000, 2,
|
||||
rk->rk_conf.enable_metrics_push);
|
||||
rd_avg_init(
|
||||
&rkb->rkb_telemetry.rd_avg_current.rkb_avg_fetch_latency,
|
||||
RD_AVG_GAUGE, 0, 500 * 1000, 2,
|
||||
rk->rk_conf.enable_metrics_push);
|
||||
} else if (rk->rk_type == RD_KAFKA_PRODUCER) {
|
||||
rd_avg_init(
|
||||
&rkb->rkb_telemetry.rd_avg_current.rkb_avg_produce_latency,
|
||||
RD_AVG_GAUGE, 0, 500 * 1000, 2, rd_true);
|
||||
rd_avg_init(
|
||||
&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_produce_latency,
|
||||
RD_AVG_GAUGE, 0, 500 * 1000, 2, rd_true);
|
||||
}
|
||||
|
||||
rk->rk_conf.stats_interval_ms ? 1 : 0);
|
||||
rd_refcnt_init(&rkb->rkb_refcnt, 0);
|
||||
rd_kafka_broker_keep(rkb); /* rk_broker's refcount */
|
||||
|
||||
|
|
@ -5429,31 +5191,6 @@ static int rd_kafka_broker_name_parse(rd_kafka_t *rk,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Add a broker from a string of type "[proto://]host[:port]" to the list
|
||||
* of brokers. *cnt is increased by one if a broker was added, else not.
|
||||
*/
|
||||
static void rd_kafka_find_or_add_broker(rd_kafka_t *rk,
|
||||
rd_kafka_secproto_t proto,
|
||||
const char *host,
|
||||
uint16_t port,
|
||||
int *cnt) {
|
||||
rd_kafka_broker_t *rkb = NULL;
|
||||
|
||||
if ((rkb = rd_kafka_broker_find(rk, proto, host, port)) &&
|
||||
rkb->rkb_source == RD_KAFKA_CONFIGURED) {
|
||||
(*cnt)++;
|
||||
} else if (rd_kafka_broker_add(rk, RD_KAFKA_CONFIGURED, proto, host,
|
||||
port, RD_KAFKA_NODEID_UA) != NULL)
|
||||
(*cnt)++;
|
||||
|
||||
/* If rd_kafka_broker_find returned a broker its
|
||||
* reference needs to be released
|
||||
* See issue #193 */
|
||||
if (rkb)
|
||||
rd_kafka_broker_destroy(rkb);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Adds a (csv list of) broker(s).
|
||||
* Returns the number of brokers succesfully added.
|
||||
|
|
@ -5461,22 +5198,17 @@ static void rd_kafka_find_or_add_broker(rd_kafka_t *rk,
|
|||
* @locality any thread
|
||||
* @locks none
|
||||
*/
|
||||
int rd_kafka_brokers_add0(rd_kafka_t *rk,
|
||||
const char *brokerlist,
|
||||
rd_bool_t is_bootstrap_server_list) {
|
||||
int rd_kafka_brokers_add0(rd_kafka_t *rk, const char *brokerlist) {
|
||||
char *s_copy = rd_strdup(brokerlist);
|
||||
char *s = s_copy;
|
||||
int cnt = 0;
|
||||
int pre_cnt = rd_atomic32_get(&rk->rk_broker_cnt);
|
||||
rd_sockaddr_inx_t *sinx;
|
||||
rd_sockaddr_list_t *sockaddr_list;
|
||||
rd_kafka_broker_t *rkb;
|
||||
int pre_cnt = rd_atomic32_get(&rk->rk_broker_cnt);
|
||||
|
||||
/* Parse comma-separated list of brokers. */
|
||||
while (*s) {
|
||||
uint16_t port;
|
||||
const char *host;
|
||||
const char *err_str;
|
||||
const char *resolved_FQDN;
|
||||
rd_kafka_secproto_t proto;
|
||||
|
||||
if (*s == ',' || *s == ' ') {
|
||||
|
|
@ -5489,43 +5221,20 @@ int rd_kafka_brokers_add0(rd_kafka_t *rk,
|
|||
break;
|
||||
|
||||
rd_kafka_wrlock(rk);
|
||||
if (is_bootstrap_server_list &&
|
||||
rk->rk_conf.client_dns_lookup ==
|
||||
RD_KAFKA_RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY) {
|
||||
rd_kafka_dbg(rk, ALL, "INIT",
|
||||
"Canonicalizing bootstrap broker %s:%d",
|
||||
host, port);
|
||||
sockaddr_list = rd_getaddrinfo(
|
||||
host, RD_KAFKA_PORT_STR, AI_ADDRCONFIG,
|
||||
rk->rk_conf.broker_addr_family, SOCK_STREAM,
|
||||
IPPROTO_TCP, rk->rk_conf.resolve_cb,
|
||||
rk->rk_conf.opaque, &err_str);
|
||||
|
||||
if (!sockaddr_list) {
|
||||
rd_kafka_log(rk, LOG_WARNING, "BROKER",
|
||||
"Failed to resolve '%s': %s", host,
|
||||
err_str);
|
||||
rd_kafka_wrunlock(rk);
|
||||
continue;
|
||||
}
|
||||
if ((rkb = rd_kafka_broker_find(rk, proto, host, port)) &&
|
||||
rkb->rkb_source == RD_KAFKA_CONFIGURED) {
|
||||
cnt++;
|
||||
} else if (rd_kafka_broker_add(rk, RD_KAFKA_CONFIGURED, proto,
|
||||
host, port,
|
||||
RD_KAFKA_NODEID_UA) != NULL)
|
||||
cnt++;
|
||||
|
||||
RD_SOCKADDR_LIST_FOREACH(sinx, sockaddr_list) {
|
||||
resolved_FQDN = rd_sockaddr2str(
|
||||
sinx, RD_SOCKADDR2STR_F_RESOLVE);
|
||||
rd_kafka_dbg(
|
||||
rk, ALL, "INIT",
|
||||
"Adding broker with resolved hostname %s",
|
||||
resolved_FQDN);
|
||||
|
||||
rd_kafka_find_or_add_broker(
|
||||
rk, proto, resolved_FQDN, port, &cnt);
|
||||
};
|
||||
|
||||
rd_sockaddr_list_destroy(sockaddr_list);
|
||||
} else {
|
||||
rd_kafka_find_or_add_broker(rk, proto, host, port,
|
||||
&cnt);
|
||||
}
|
||||
/* If rd_kafka_broker_find returned a broker its
|
||||
* reference needs to be released
|
||||
* See issue #193 */
|
||||
if (rkb)
|
||||
rd_kafka_broker_destroy(rkb);
|
||||
|
||||
rd_kafka_wrunlock(rk);
|
||||
}
|
||||
|
|
@ -5547,7 +5256,7 @@ int rd_kafka_brokers_add0(rd_kafka_t *rk,
|
|||
|
||||
|
||||
int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist) {
|
||||
return rd_kafka_brokers_add0(rk, brokerlist, rd_false);
|
||||
return rd_kafka_brokers_add0(rk, brokerlist);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -6125,46 +5834,6 @@ void rd_kafka_broker_monitor_del(rd_kafka_broker_monitor_t *rkbmon) {
|
|||
rd_kafka_broker_destroy(rkb);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Starts the reauth timer for this broker.
|
||||
* If connections_max_reauth_ms=0, then no timer is set.
|
||||
*
|
||||
* @locks none
|
||||
* @locality broker thread
|
||||
*/
|
||||
void rd_kafka_broker_start_reauth_timer(rd_kafka_broker_t *rkb,
|
||||
int64_t connections_max_reauth_ms) {
|
||||
/* Timer should not already be started. It indicates that we're about to
|
||||
* schedule an extra reauth, but this shouldn't be a cause for failure
|
||||
* in production use cases, so, clear the timer. */
|
||||
if (rd_kafka_timer_is_started(&rkb->rkb_rk->rk_timers,
|
||||
&rkb->rkb_sasl_reauth_tmr))
|
||||
rd_kafka_timer_stop(&rkb->rkb_rk->rk_timers,
|
||||
&rkb->rkb_sasl_reauth_tmr, 1 /*lock*/);
|
||||
|
||||
if (connections_max_reauth_ms == 0)
|
||||
return;
|
||||
|
||||
rd_kafka_timer_start_oneshot(
|
||||
&rkb->rkb_rk->rk_timers, &rkb->rkb_sasl_reauth_tmr, rd_false,
|
||||
connections_max_reauth_ms * 900 /* 90% * microsecond*/,
|
||||
rd_kafka_broker_start_reauth_cb, (void *)rkb);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Starts the reauth process for the broker rkb.
|
||||
*
|
||||
* @locks none
|
||||
* @locality main thread
|
||||
*/
|
||||
void rd_kafka_broker_start_reauth_cb(rd_kafka_timers_t *rkts, void *_rkb) {
|
||||
rd_kafka_op_t *rko = NULL;
|
||||
rd_kafka_broker_t *rkb = (rd_kafka_broker_t *)_rkb;
|
||||
rd_dassert(rkb);
|
||||
rko = rd_kafka_op_new(RD_KAFKA_OP_SASL_REAUTH);
|
||||
rd_kafka_q_enq(rkb->rkb_ops, rko);
|
||||
}
|
||||
|
||||
/**
|
||||
* @name Unit tests
|
||||
* @{
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012,2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* Copyright (c) 2012,2013 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -55,7 +54,6 @@ typedef enum {
|
|||
RD_KAFKA_BROKER_STATE_APIVERSION_QUERY,
|
||||
RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE,
|
||||
RD_KAFKA_BROKER_STATE_AUTH_REQ,
|
||||
RD_KAFKA_BROKER_STATE_REAUTH,
|
||||
} rd_kafka_broker_state_t;
|
||||
|
||||
/**
|
||||
|
|
@ -193,40 +191,6 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */
|
|||
rd_atomic64_t ts_recv; /**< Timestamp of last receive */
|
||||
} rkb_c;
|
||||
|
||||
struct {
|
||||
struct {
|
||||
int32_t connects; /**< Connection attempts,
|
||||
* successful or not. */
|
||||
} rkb_historic_c;
|
||||
|
||||
struct {
|
||||
rd_avg_t rkb_avg_rtt; /* Current RTT avg */
|
||||
rd_avg_t rkb_avg_throttle; /* Current throttle avg */
|
||||
rd_avg_t
|
||||
rkb_avg_outbuf_latency; /**< Current latency
|
||||
* between buf_enq0
|
||||
* and writing to socket
|
||||
*/
|
||||
rd_avg_t rkb_avg_fetch_latency; /**< Current fetch
|
||||
* latency avg */
|
||||
rd_avg_t rkb_avg_produce_latency; /**< Current produce
|
||||
* latency avg */
|
||||
} rd_avg_current;
|
||||
|
||||
struct {
|
||||
rd_avg_t rkb_avg_rtt; /**< Rolled over RTT avg */
|
||||
rd_avg_t
|
||||
rkb_avg_throttle; /**< Rolled over throttle avg */
|
||||
rd_avg_t rkb_avg_outbuf_latency; /**< Rolled over outbuf
|
||||
* latency avg */
|
||||
rd_avg_t rkb_avg_fetch_latency; /**< Rolled over fetch
|
||||
* latency avg */
|
||||
rd_avg_t
|
||||
rkb_avg_produce_latency; /**< Rolled over produce
|
||||
* latency avg */
|
||||
} rd_avg_rollover;
|
||||
} rkb_telemetry;
|
||||
|
||||
int rkb_req_timeouts; /* Current value */
|
||||
|
||||
thrd_t rkb_thread;
|
||||
|
|
@ -288,9 +252,6 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */
|
|||
/** Absolute time of last connection attempt. */
|
||||
rd_ts_t rkb_ts_connect;
|
||||
|
||||
/** True if a reauthentication is in progress. */
|
||||
rd_bool_t rkb_reauth_in_progress;
|
||||
|
||||
/**< Persistent connection demand is tracked by
|
||||
* a counter for each type of demand.
|
||||
* The broker thread will maintain a persistent connection
|
||||
|
|
@ -362,9 +323,6 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */
|
|||
rd_kafka_resp_err_t err; /**< Last error code */
|
||||
int cnt; /**< Number of identical errors */
|
||||
} rkb_last_err;
|
||||
|
||||
|
||||
rd_kafka_timer_t rkb_sasl_reauth_tmr;
|
||||
};
|
||||
|
||||
#define rd_kafka_broker_keep(rkb) rd_refcnt_add(&(rkb)->rkb_refcnt)
|
||||
|
|
@ -445,13 +403,6 @@ int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb,
|
|||
int16_t maxver,
|
||||
int *featuresp);
|
||||
|
||||
int16_t rd_kafka_broker_ApiVersion_supported0(rd_kafka_broker_t *rkb,
|
||||
int16_t ApiKey,
|
||||
int16_t minver,
|
||||
int16_t maxver,
|
||||
int *featuresp,
|
||||
rd_bool_t do_lock);
|
||||
|
||||
rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0_fl(const char *func,
|
||||
int line,
|
||||
rd_kafka_t *rk,
|
||||
|
|
@ -510,9 +461,7 @@ rd_kafka_broker_t *rd_kafka_broker_controller_async(rd_kafka_t *rk,
|
|||
int state,
|
||||
rd_kafka_enq_once_t *eonce);
|
||||
|
||||
int rd_kafka_brokers_add0(rd_kafka_t *rk,
|
||||
const char *brokerlist,
|
||||
rd_bool_t is_bootstrap_server_list);
|
||||
int rd_kafka_brokers_add0(rd_kafka_t *rk, const char *brokerlist);
|
||||
void rd_kafka_broker_set_state(rd_kafka_broker_t *rkb, int state);
|
||||
|
||||
void rd_kafka_broker_fail(rd_kafka_broker_t *rkb,
|
||||
|
|
@ -558,13 +507,9 @@ void rd_kafka_broker_connect_done(rd_kafka_broker_t *rkb, const char *errstr);
|
|||
int rd_kafka_send(rd_kafka_broker_t *rkb);
|
||||
int rd_kafka_recv(rd_kafka_broker_t *rkb);
|
||||
|
||||
#define rd_kafka_dr_msgq(rkt, rkmq, err) \
|
||||
rd_kafka_dr_msgq0(rkt, rkmq, err, NULL /*no produce result*/)
|
||||
|
||||
void rd_kafka_dr_msgq0(rd_kafka_topic_t *rkt,
|
||||
rd_kafka_msgq_t *rkmq,
|
||||
rd_kafka_resp_err_t err,
|
||||
const rd_kafka_Produce_result_t *presult);
|
||||
void rd_kafka_dr_msgq(rd_kafka_topic_t *rkt,
|
||||
rd_kafka_msgq_t *rkmq,
|
||||
rd_kafka_resp_err_t err);
|
||||
|
||||
void rd_kafka_dr_implicit_ack(rd_kafka_broker_t *rkb,
|
||||
rd_kafka_toppar_t *rktp,
|
||||
|
|
@ -613,25 +558,6 @@ int rd_kafka_brokers_wait_state_change_async(rd_kafka_t *rk,
|
|||
rd_kafka_enq_once_t *eonce);
|
||||
void rd_kafka_brokers_broadcast_state_change(rd_kafka_t *rk);
|
||||
|
||||
rd_kafka_broker_t *rd_kafka_broker_random0(const char *func,
|
||||
int line,
|
||||
rd_kafka_t *rk,
|
||||
rd_bool_t is_up,
|
||||
int state,
|
||||
int *filtered_cnt,
|
||||
int (*filter)(rd_kafka_broker_t *rk,
|
||||
void *opaque),
|
||||
void *opaque);
|
||||
|
||||
#define rd_kafka_broker_random(rk, state, filter, opaque) \
|
||||
rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk, rd_false, state, \
|
||||
NULL, filter, opaque)
|
||||
|
||||
#define rd_kafka_broker_random_up(rk, filter, opaque) \
|
||||
rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk, rd_true, \
|
||||
RD_KAFKA_BROKER_STATE_UP, NULL, filter, \
|
||||
opaque)
|
||||
|
||||
|
||||
|
||||
/**
|
||||
|
|
@ -676,11 +602,6 @@ void rd_kafka_broker_monitor_add(rd_kafka_broker_monitor_t *rkbmon,
|
|||
|
||||
void rd_kafka_broker_monitor_del(rd_kafka_broker_monitor_t *rkbmon);
|
||||
|
||||
void rd_kafka_broker_start_reauth_timer(rd_kafka_broker_t *rkb,
|
||||
int64_t connections_max_reauth_ms);
|
||||
|
||||
void rd_kafka_broker_start_reauth_cb(rd_kafka_timers_t *rkts, void *rkb);
|
||||
|
||||
int unittest_broker(void);
|
||||
|
||||
#endif /* _RDKAFKA_BROKER_H_ */
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* 2023, Confluent Inc.
|
||||
* Copyright (c) 2012-2015, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -38,8 +37,6 @@ void rd_kafka_buf_destroy_final(rd_kafka_buf_t *rkbuf) {
|
|||
case RD_KAFKAP_Metadata:
|
||||
if (rkbuf->rkbuf_u.Metadata.topics)
|
||||
rd_list_destroy(rkbuf->rkbuf_u.Metadata.topics);
|
||||
if (rkbuf->rkbuf_u.Metadata.topic_ids)
|
||||
rd_list_destroy(rkbuf->rkbuf_u.Metadata.topic_ids);
|
||||
if (rkbuf->rkbuf_u.Metadata.reason)
|
||||
rd_free(rkbuf->rkbuf_u.Metadata.reason);
|
||||
if (rkbuf->rkbuf_u.Metadata.rko)
|
||||
|
|
@ -123,18 +120,6 @@ rd_kafka_buf_t *rd_kafka_buf_new0(int segcnt, size_t size, int flags) {
|
|||
return rkbuf;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Upgrade request header to flexver by writing header tags.
|
||||
*/
|
||||
void rd_kafka_buf_upgrade_flexver_request(rd_kafka_buf_t *rkbuf) {
|
||||
if (likely(!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER))) {
|
||||
rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_FLEXVER;
|
||||
|
||||
/* Empty request header tags */
|
||||
rd_kafka_buf_write_i8(rkbuf, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Create new request buffer with the request-header written (will
|
||||
|
|
@ -180,7 +165,12 @@ rd_kafka_buf_t *rd_kafka_buf_new_request0(rd_kafka_broker_t *rkb,
|
|||
rd_kafka_buf_write_kstr(rkbuf, rkb->rkb_rk->rk_client_id);
|
||||
|
||||
if (is_flexver) {
|
||||
rd_kafka_buf_upgrade_flexver_request(rkbuf);
|
||||
/* Must set flexver after writing the client id since
|
||||
* it is still a standard non-compact string. */
|
||||
rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_FLEXVER;
|
||||
|
||||
/* Empty request header tags */
|
||||
rd_kafka_buf_write_i8(rkbuf, 0);
|
||||
}
|
||||
|
||||
return rkbuf;
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* 2023 Confluent Inc.
|
||||
* Copyright (c) 2012-2015, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -49,36 +48,21 @@ typedef struct rd_tmpabuf_s {
|
|||
size_t of;
|
||||
char *buf;
|
||||
int failed;
|
||||
rd_bool_t assert_on_fail;
|
||||
int assert_on_fail;
|
||||
} rd_tmpabuf_t;
|
||||
|
||||
/**
|
||||
* @brief Initialize new tmpabuf of non-final \p size bytes.
|
||||
* @brief Allocate new tmpabuf with \p size bytes pre-allocated.
|
||||
*/
|
||||
static RD_UNUSED void
|
||||
rd_tmpabuf_new(rd_tmpabuf_t *tab, size_t size, rd_bool_t assert_on_fail) {
|
||||
tab->buf = NULL;
|
||||
tab->size = RD_ROUNDUP(size, 8);
|
||||
rd_tmpabuf_new(rd_tmpabuf_t *tab, size_t size, int assert_on_fail) {
|
||||
tab->buf = rd_malloc(size);
|
||||
tab->size = size;
|
||||
tab->of = 0;
|
||||
tab->failed = 0;
|
||||
tab->assert_on_fail = assert_on_fail;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Add a new allocation of \p _size bytes,
|
||||
* rounded up to maximum word size,
|
||||
* for \p _times times.
|
||||
*/
|
||||
#define rd_tmpabuf_add_alloc_times(_tab, _size, _times) \
|
||||
(_tab)->size += RD_ROUNDUP(_size, 8) * _times
|
||||
|
||||
#define rd_tmpabuf_add_alloc(_tab, _size) \
|
||||
rd_tmpabuf_add_alloc_times(_tab, _size, 1)
|
||||
/**
|
||||
* @brief Finalize tmpabuf pre-allocating tab->size bytes.
|
||||
*/
|
||||
#define rd_tmpabuf_finalize(_tab) (_tab)->buf = rd_malloc((_tab)->size)
|
||||
|
||||
/**
|
||||
* @brief Free memory allocated by tmpabuf
|
||||
*/
|
||||
|
|
@ -375,18 +359,13 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
|
|||
|
||||
union {
|
||||
struct {
|
||||
rd_list_t *topics; /* Requested topics (char *) */
|
||||
rd_list_t *
|
||||
topic_ids; /* Requested topic ids rd_kafka_Uuid_t */
|
||||
char *reason; /* Textual reason */
|
||||
rd_list_t *topics; /* Requested topics (char *) */
|
||||
char *reason; /* Textual reason */
|
||||
rd_kafka_op_t *rko; /* Originating rko with replyq
|
||||
* (if any) */
|
||||
rd_bool_t all_topics; /**< Full/All topics requested */
|
||||
rd_bool_t cgrp_update; /**< Update cgrp with topic
|
||||
* status from response. */
|
||||
rd_bool_t force_racks; /**< Force the returned metadata
|
||||
* to contain partition to
|
||||
* rack mapping. */
|
||||
|
||||
int *decr; /* Decrement this integer by one
|
||||
* when request is complete:
|
||||
|
|
@ -703,10 +682,6 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
|
|||
size_t _slen; \
|
||||
char *_dst; \
|
||||
rd_kafka_buf_read_str(rkbuf, &_kstr); \
|
||||
if (RD_KAFKAP_STR_IS_NULL(&_kstr)) { \
|
||||
dst = NULL; \
|
||||
break; \
|
||||
} \
|
||||
_slen = RD_KAFKAP_STR_LEN(&_kstr); \
|
||||
if (!(_dst = rd_tmpabuf_write(tmpabuf, _kstr.str, _slen + 1))) \
|
||||
rd_kafka_buf_parse_fail( \
|
||||
|
|
@ -719,44 +694,21 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
|
|||
} while (0)
|
||||
|
||||
/**
|
||||
* Skip a string without flexver.
|
||||
* Skip a string.
|
||||
*/
|
||||
#define rd_kafka_buf_skip_str_no_flexver(rkbuf) \
|
||||
#define rd_kafka_buf_skip_str(rkbuf) \
|
||||
do { \
|
||||
int16_t _slen; \
|
||||
rd_kafka_buf_read_i16(rkbuf, &_slen); \
|
||||
rd_kafka_buf_skip(rkbuf, RD_KAFKAP_STR_LEN0(_slen)); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* Skip a string (generic).
|
||||
*/
|
||||
#define rd_kafka_buf_skip_str(rkbuf) \
|
||||
/* Read Kafka Bytes representation (4+N).
|
||||
* The 'kbytes' will be updated to point to rkbuf data */
|
||||
#define rd_kafka_buf_read_bytes(rkbuf, kbytes) \
|
||||
do { \
|
||||
if ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { \
|
||||
uint64_t _uva; \
|
||||
rd_kafka_buf_read_uvarint(rkbuf, &_uva); \
|
||||
rd_kafka_buf_skip( \
|
||||
rkbuf, RD_KAFKAP_STR_LEN0(((int64_t)_uva) - 1)); \
|
||||
} else { \
|
||||
rd_kafka_buf_skip_str_no_flexver(rkbuf); \
|
||||
} \
|
||||
} while (0)
|
||||
/**
|
||||
* Read Kafka COMPACT_BYTES representation (VARINT+N) or
|
||||
* standard BYTES representation(4+N).
|
||||
* The 'kbytes' will be updated to point to rkbuf data.
|
||||
*/
|
||||
#define rd_kafka_buf_read_kbytes(rkbuf, kbytes) \
|
||||
do { \
|
||||
int32_t _klen; \
|
||||
if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) { \
|
||||
rd_kafka_buf_read_i32a(rkbuf, _klen); \
|
||||
} else { \
|
||||
uint64_t _uva; \
|
||||
rd_kafka_buf_read_uvarint(rkbuf, &_uva); \
|
||||
_klen = ((int32_t)_uva) - 1; \
|
||||
} \
|
||||
int _klen; \
|
||||
rd_kafka_buf_read_i32a(rkbuf, _klen); \
|
||||
(kbytes)->len = _klen; \
|
||||
if (RD_KAFKAP_BYTES_IS_NULL(kbytes)) { \
|
||||
(kbytes)->data = NULL; \
|
||||
|
|
@ -768,6 +720,7 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
|
|||
rd_kafka_buf_check_len(rkbuf, _klen); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/**
|
||||
* @brief Read \p size bytes from buffer, setting \p *ptr to the start
|
||||
* of the memory region.
|
||||
|
|
@ -784,7 +737,7 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
|
|||
/**
|
||||
* @brief Read varint-lengted Kafka Bytes representation
|
||||
*/
|
||||
#define rd_kafka_buf_read_kbytes_varint(rkbuf, kbytes) \
|
||||
#define rd_kafka_buf_read_bytes_varint(rkbuf, kbytes) \
|
||||
do { \
|
||||
int64_t _len2; \
|
||||
size_t _r = \
|
||||
|
|
@ -831,62 +784,18 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
|
|||
uint64_t _tagtype, _taglen; \
|
||||
rd_kafka_buf_read_uvarint(rkbuf, &_tagtype); \
|
||||
rd_kafka_buf_read_uvarint(rkbuf, &_taglen); \
|
||||
if (_taglen > 0) \
|
||||
rd_kafka_buf_skip(rkbuf, (size_t)(_taglen)); \
|
||||
if (_taglen > 1) \
|
||||
rd_kafka_buf_skip(rkbuf, \
|
||||
(size_t)(_taglen - 1)); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* @brief Read KIP-482 Tags at current position in the buffer using
|
||||
* the `read_tag` function receiving the `opaque' pointer.
|
||||
* @brief Write tags at the current position in the buffer.
|
||||
* @remark Currently always writes empty tags.
|
||||
* @remark Change to ..write_uvarint() when actual tags are supported.
|
||||
*/
|
||||
#define rd_kafka_buf_read_tags(rkbuf, read_tag, ...) \
|
||||
do { \
|
||||
uint64_t _tagcnt; \
|
||||
if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \
|
||||
break; \
|
||||
rd_kafka_buf_read_uvarint(rkbuf, &_tagcnt); \
|
||||
while (_tagcnt-- > 0) { \
|
||||
uint64_t _tagtype, _taglen; \
|
||||
rd_kafka_buf_read_uvarint(rkbuf, &_tagtype); \
|
||||
rd_kafka_buf_read_uvarint(rkbuf, &_taglen); \
|
||||
int _read_tag_resp = \
|
||||
read_tag(rkbuf, _tagtype, _taglen, __VA_ARGS__); \
|
||||
if (_read_tag_resp == -1) \
|
||||
goto err_parse; \
|
||||
if (!_read_tag_resp && _taglen > 0) \
|
||||
rd_kafka_buf_skip(rkbuf, (size_t)(_taglen)); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* @brief Write \p tagcnt tags at the current position in the buffer.
|
||||
* Calling \p write_tag to write each one with \p rkbuf , tagtype
|
||||
* argument and the remaining arguments.
|
||||
*/
|
||||
#define rd_kafka_buf_write_tags(rkbuf, write_tag, tags, tagcnt, ...) \
|
||||
do { \
|
||||
uint64_t i; \
|
||||
if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \
|
||||
break; \
|
||||
rd_kafka_buf_write_uvarint(rkbuf, tagcnt); \
|
||||
for (i = 0; i < tagcnt; i++) { \
|
||||
size_t of_taglen, prev_buf_len; \
|
||||
rd_kafka_buf_write_uvarint(rkbuf, tags[i]); \
|
||||
of_taglen = rd_kafka_buf_write_arraycnt_pos(rkbuf); \
|
||||
prev_buf_len = (rkbuf)->rkbuf_buf.rbuf_len; \
|
||||
write_tag(rkbuf, tags[i], __VA_ARGS__); \
|
||||
rd_kafka_buf_finalize_arraycnt( \
|
||||
rkbuf, of_taglen, \
|
||||
(rkbuf)->rkbuf_buf.rbuf_len - prev_buf_len - 1); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
/**
|
||||
* @brief Write empty tags at the current position in the buffer.
|
||||
*/
|
||||
#define rd_kafka_buf_write_tags_empty(rkbuf) \
|
||||
#define rd_kafka_buf_write_tags(rkbuf) \
|
||||
do { \
|
||||
if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \
|
||||
break; \
|
||||
|
|
@ -906,8 +815,7 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
|
|||
} else { \
|
||||
rd_kafka_buf_read_i32(rkbuf, arrcnt); \
|
||||
} \
|
||||
if (*(arrcnt) < -1 || \
|
||||
((maxval) != -1 && *(arrcnt) > (maxval))) \
|
||||
if (*(arrcnt) < 0 || ((maxval) != -1 && *(arrcnt) > (maxval))) \
|
||||
rd_kafka_buf_parse_fail( \
|
||||
rkbuf, "ApiArrayCnt %" PRId32 " out of range", \
|
||||
*(arrcnt)); \
|
||||
|
|
@ -1009,7 +917,6 @@ rd_kafka_buf_t *rd_kafka_buf_new_request0(rd_kafka_broker_t *rkb,
|
|||
#define rd_kafka_buf_new_flexver_request(rkb, ApiKey, segcnt, size, \
|
||||
is_flexver) \
|
||||
rd_kafka_buf_new_request0(rkb, ApiKey, segcnt, size, is_flexver)
|
||||
void rd_kafka_buf_upgrade_flexver_request(rd_kafka_buf_t *rkbuf);
|
||||
|
||||
rd_kafka_buf_t *
|
||||
rd_kafka_buf_new_shadow(const void *ptr, size_t size, void (*free_cb)(void *));
|
||||
|
|
@ -1165,57 +1072,9 @@ rd_kafka_buf_update_u32(rd_kafka_buf_t *rkbuf, size_t of, uint32_t v) {
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Write varint-encoded signed value to buffer.
|
||||
*/
|
||||
static RD_INLINE size_t rd_kafka_buf_write_varint(rd_kafka_buf_t *rkbuf,
|
||||
int64_t v) {
|
||||
char varint[RD_UVARINT_ENC_SIZEOF(v)];
|
||||
size_t sz;
|
||||
|
||||
sz = rd_uvarint_enc_i64(varint, sizeof(varint), v);
|
||||
|
||||
return rd_kafka_buf_write(rkbuf, varint, sz);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Write varint-encoded unsigned value to buffer.
|
||||
*/
|
||||
static RD_INLINE size_t rd_kafka_buf_write_uvarint(rd_kafka_buf_t *rkbuf,
|
||||
uint64_t v) {
|
||||
char varint[RD_UVARINT_ENC_SIZEOF(v)];
|
||||
size_t sz;
|
||||
|
||||
sz = rd_uvarint_enc_u64(varint, sizeof(varint), v);
|
||||
|
||||
return rd_kafka_buf_write(rkbuf, varint, sz);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief Write standard or flexver arround count field to buffer.
|
||||
* Use this when the array count is known beforehand, else use
|
||||
* rd_kafka_buf_write_arraycnt_pos().
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED size_t
|
||||
rd_kafka_buf_write_arraycnt(rd_kafka_buf_t *rkbuf, size_t cnt) {
|
||||
|
||||
/* Count must fit in 31-bits minus the per-byte carry-bit */
|
||||
rd_assert(cnt + 1 < (size_t)(INT_MAX >> 4));
|
||||
|
||||
if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER))
|
||||
return rd_kafka_buf_write_i32(rkbuf, (int32_t)cnt);
|
||||
|
||||
/* CompactArray has a base of 1, 0 is for Null arrays */
|
||||
cnt += 1;
|
||||
return rd_kafka_buf_write_uvarint(rkbuf, (uint64_t)cnt);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Write array count field to buffer (i32) for later update with
|
||||
* rd_kafka_buf_finalize_arraycnt().
|
||||
* rd_kafka_buf_update_arraycnt().
|
||||
*/
|
||||
#define rd_kafka_buf_write_arraycnt_pos(rkbuf) rd_kafka_buf_write_i32(rkbuf, 0)
|
||||
|
||||
|
|
@ -1233,11 +1092,11 @@ rd_kafka_buf_write_arraycnt(rd_kafka_buf_t *rkbuf, size_t cnt) {
|
|||
* and may thus be costly.
|
||||
*/
|
||||
static RD_INLINE void
|
||||
rd_kafka_buf_finalize_arraycnt(rd_kafka_buf_t *rkbuf, size_t of, size_t cnt) {
|
||||
rd_kafka_buf_finalize_arraycnt(rd_kafka_buf_t *rkbuf, size_t of, int cnt) {
|
||||
char buf[sizeof(int32_t)];
|
||||
size_t sz, r;
|
||||
|
||||
rd_assert(cnt < (size_t)INT_MAX);
|
||||
rd_assert(cnt >= 0);
|
||||
|
||||
if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) {
|
||||
rd_kafka_buf_update_i32(rkbuf, of, (int32_t)cnt);
|
||||
|
|
@ -1249,8 +1108,7 @@ rd_kafka_buf_finalize_arraycnt(rd_kafka_buf_t *rkbuf, size_t of, size_t cnt) {
|
|||
|
||||
sz = rd_uvarint_enc_u64(buf, sizeof(buf), (uint64_t)cnt);
|
||||
rd_assert(!RD_UVARINT_OVERFLOW(sz));
|
||||
if (cnt < 127)
|
||||
rd_assert(sz == 1);
|
||||
|
||||
rd_buf_write_update(&rkbuf->rkbuf_buf, of, buf, sz);
|
||||
|
||||
if (sz < sizeof(int32_t)) {
|
||||
|
|
@ -1283,6 +1141,34 @@ rd_kafka_buf_update_i64(rd_kafka_buf_t *rkbuf, size_t of, int64_t v) {
|
|||
rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Write varint-encoded signed value to buffer.
|
||||
*/
|
||||
static RD_INLINE size_t rd_kafka_buf_write_varint(rd_kafka_buf_t *rkbuf,
|
||||
int64_t v) {
|
||||
char varint[RD_UVARINT_ENC_SIZEOF(v)];
|
||||
size_t sz;
|
||||
|
||||
sz = rd_uvarint_enc_i64(varint, sizeof(varint), v);
|
||||
|
||||
return rd_kafka_buf_write(rkbuf, varint, sz);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Write varint-encoded unsigned value to buffer.
|
||||
*/
|
||||
static RD_INLINE size_t rd_kafka_buf_write_uvarint(rd_kafka_buf_t *rkbuf,
|
||||
uint64_t v) {
|
||||
char varint[RD_UVARINT_ENC_SIZEOF(v)];
|
||||
size_t sz;
|
||||
|
||||
sz = rd_uvarint_enc_u64(varint, sizeof(varint), v);
|
||||
|
||||
return rd_kafka_buf_write(rkbuf, varint, sz);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Write standard (2-byte header) or KIP-482 COMPACT_STRING to buffer.
|
||||
*
|
||||
|
|
@ -1388,40 +1274,30 @@ static RD_INLINE void rd_kafka_buf_push_kstr(rd_kafka_buf_t *rkbuf,
|
|||
static RD_INLINE size_t
|
||||
rd_kafka_buf_write_kbytes(rd_kafka_buf_t *rkbuf,
|
||||
const rd_kafkap_bytes_t *kbytes) {
|
||||
size_t len, r;
|
||||
size_t len;
|
||||
|
||||
if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) {
|
||||
if (!kbytes || RD_KAFKAP_BYTES_IS_NULL(kbytes))
|
||||
return rd_kafka_buf_write_i32(rkbuf, -1);
|
||||
if (!kbytes || RD_KAFKAP_BYTES_IS_NULL(kbytes))
|
||||
return rd_kafka_buf_write_i32(rkbuf, -1);
|
||||
|
||||
if (RD_KAFKAP_BYTES_IS_SERIALIZED(kbytes))
|
||||
return rd_kafka_buf_write(rkbuf,
|
||||
RD_KAFKAP_BYTES_SER(kbytes),
|
||||
RD_KAFKAP_BYTES_SIZE(kbytes));
|
||||
if (RD_KAFKAP_BYTES_IS_SERIALIZED(kbytes))
|
||||
return rd_kafka_buf_write(rkbuf, RD_KAFKAP_BYTES_SER(kbytes),
|
||||
RD_KAFKAP_BYTES_SIZE(kbytes));
|
||||
|
||||
len = RD_KAFKAP_BYTES_LEN(kbytes);
|
||||
rd_kafka_buf_write_i32(rkbuf, (int32_t)len);
|
||||
rd_kafka_buf_write(rkbuf, kbytes->data, len);
|
||||
len = RD_KAFKAP_BYTES_LEN(kbytes);
|
||||
rd_kafka_buf_write_i32(rkbuf, (int32_t)len);
|
||||
rd_kafka_buf_write(rkbuf, kbytes->data, len);
|
||||
|
||||
return 4 + len;
|
||||
}
|
||||
return 4 + len;
|
||||
}
|
||||
|
||||
/* COMPACT_BYTES lengths are:
|
||||
* 0 = NULL,
|
||||
* 1 = empty
|
||||
* N.. = length + 1
|
||||
*/
|
||||
if (!kbytes)
|
||||
len = 0;
|
||||
else
|
||||
len = kbytes->len + 1;
|
||||
|
||||
r = rd_kafka_buf_write_uvarint(rkbuf, (uint64_t)len);
|
||||
if (len > 1) {
|
||||
rd_kafka_buf_write(rkbuf, kbytes->data, len - 1);
|
||||
r += len - 1;
|
||||
}
|
||||
return r;
|
||||
/**
|
||||
* Push (i.e., no copy) Kafka bytes to buffer iovec
|
||||
*/
|
||||
static RD_INLINE void
|
||||
rd_kafka_buf_push_kbytes(rd_kafka_buf_t *rkbuf,
|
||||
const rd_kafkap_bytes_t *kbytes) {
|
||||
rd_kafka_buf_push(rkbuf, RD_KAFKAP_BYTES_SER(kbytes),
|
||||
RD_KAFKAP_BYTES_SIZE(kbytes), NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1505,20 +1381,4 @@ void rd_kafka_buf_set_maker(rd_kafka_buf_t *rkbuf,
|
|||
void *make_opaque,
|
||||
void (*free_make_opaque_cb)(void *make_opaque));
|
||||
|
||||
|
||||
#define rd_kafka_buf_read_uuid(rkbuf, uuid) \
|
||||
do { \
|
||||
rd_kafka_buf_read_i64(rkbuf, \
|
||||
&((uuid)->most_significant_bits)); \
|
||||
rd_kafka_buf_read_i64(rkbuf, \
|
||||
&((uuid)->least_significant_bits)); \
|
||||
(uuid)->base64str[0] = '\0'; \
|
||||
} while (0)
|
||||
|
||||
static RD_UNUSED void rd_kafka_buf_write_uuid(rd_kafka_buf_t *rkbuf,
|
||||
rd_kafka_Uuid_t *uuid) {
|
||||
rd_kafka_buf_write_i64(rkbuf, uuid->most_significant_bits);
|
||||
rd_kafka_buf_write_i64(rkbuf, uuid->least_significant_bits);
|
||||
}
|
||||
|
||||
#endif /* _RDKAFKA_BUF_H_ */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2019-2022, Magnus Edenhill
|
||||
* Copyright (c) 2019 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* librdkafka - The Apache Kafka C/C++ library
|
||||
*
|
||||
* Copyright (c) 2019-2022, Magnus Edenhill
|
||||
* Copyright (c) 2019 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
*
|
||||
* Copyright (c) 2012-2022, Magnus Edenhill
|
||||
* 2023, Confluent Inc.
|
||||
* Copyright (c) 2012-2015, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
|
@ -57,7 +56,6 @@ typedef struct rd_kafka_cgrp_s {
|
|||
rd_kafkap_str_t *rkcg_member_id; /* Last assigned MemberId */
|
||||
rd_kafkap_str_t *rkcg_group_instance_id;
|
||||
const rd_kafkap_str_t *rkcg_client_id;
|
||||
rd_kafkap_str_t *rkcg_client_rack;
|
||||
|
||||
enum {
|
||||
/* Init state */
|
||||
|
|
@ -165,10 +163,6 @@ typedef struct rd_kafka_cgrp_s {
|
|||
|
||||
rd_interval_t rkcg_coord_query_intvl; /* Coordinator query intvl*/
|
||||
rd_interval_t rkcg_heartbeat_intvl; /* Heartbeat intvl */
|
||||
rd_kafka_timer_t rkcg_serve_timer; /* Timer for next serve. */
|
||||
int rkcg_heartbeat_intvl_ms; /* KIP 848: received
|
||||
* heartbeat interval in
|
||||
* milliseconds */
|
||||
rd_interval_t rkcg_join_intvl; /* JoinGroup interval */
|
||||
rd_interval_t rkcg_timeout_scan_intvl; /* Timeout scanner */
|
||||
|
||||
|
|
@ -185,8 +179,7 @@ typedef struct rd_kafka_cgrp_s {
|
|||
|
||||
rd_list_t rkcg_toppars; /* Toppars subscribed to*/
|
||||
|
||||
int32_t rkcg_generation_id; /* Current generation id (classic)
|
||||
* or member epoch (consumer). */
|
||||
int32_t rkcg_generation_id; /* Current generation id */
|
||||
|
||||
rd_kafka_assignor_t *rkcg_assignor; /**< The current partition
|
||||
* assignor. used by both
|
||||
|
|
@ -197,12 +190,6 @@ typedef struct rd_kafka_cgrp_s {
|
|||
int32_t rkcg_coord_id; /**< Current coordinator id,
|
||||
* or -1 if not known. */
|
||||
|
||||
rd_kafka_group_protocol_t
|
||||
rkcg_group_protocol; /**< Group protocol to use */
|
||||
|
||||
rd_kafkap_str_t *rkcg_group_remote_assignor; /**< Group remote
|
||||
* assignor to use */
|
||||
|
||||
rd_kafka_broker_t *rkcg_curr_coord; /**< Current coordinator
|
||||
* broker handle, or NULL.
|
||||
* rkcg_coord's nodename is
|
||||
|
|
@ -268,46 +255,6 @@ typedef struct rd_kafka_cgrp_s {
|
|||
* currently in-progress incremental unassign. */
|
||||
rd_kafka_topic_partition_list_t *rkcg_rebalance_incr_assignment;
|
||||
|
||||
/** Current acked assignment, start with an empty list. */
|
||||
rd_kafka_topic_partition_list_t *rkcg_current_assignment;
|
||||
|
||||
/** Assignment the is currently reconciling.
|
||||
* Can be NULL in case there's no reconciliation ongoing. */
|
||||
rd_kafka_topic_partition_list_t *rkcg_target_assignment;
|
||||
|
||||
/** Next assignment that will be reconciled once current
|
||||
* reconciliation finishes. Can be NULL. */
|
||||
rd_kafka_topic_partition_list_t *rkcg_next_target_assignment;
|
||||
|
||||
/** Number of backoff retries when expediting next heartbeat. */
|
||||
int rkcg_expedite_heartbeat_retries;
|
||||
|
||||
/** Flags for KIP-848 state machine. */
|
||||
int rkcg_consumer_flags;
|
||||
/** Coordinator is waiting for an acknowledgement of currently reconciled
|
||||
* target assignment. Cleared when an HB succeeds
|
||||
* after reconciliation finishes. */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_WAIT_ACK 0x1
|
||||
/** Member is sending an acknowledgement for a reconciled assignment */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_SENDING_ACK 0x2
|
||||
/** A new subscription needs to be sent to the Coordinator. */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_SEND_NEW_SUBSCRIPTION 0x4
|
||||
/** A new subscription is being sent to the Coordinator. */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_SENDING_NEW_SUBSCRIPTION 0x8
|
||||
/** Consumer has subscribed at least once,
|
||||
* if it didn't happen rebalance protocol is still
|
||||
* considered NONE, otherwise it depends on the
|
||||
* configured partition assignors. */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_SUBSCRIBED_ONCE 0x10
|
||||
/** Send a complete request in next heartbeat */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_SEND_FULL_REQUEST 0x20
|
||||
/** Member is fenced, need to rejoin */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN 0x40
|
||||
/** Member is fenced, rejoining */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_WAIT_REJOIN_TO_COMPLETE 0x80
|
||||
/** Serve pending assignments after heartbeat */
|
||||
#define RD_KAFKA_CGRP_CONSUMER_F_SERVE_PENDING 0x100
|
||||
|
||||
/** Rejoin the group following a currently in-progress
|
||||
* incremental unassign. */
|
||||
rd_bool_t rkcg_rebalance_rejoin;
|
||||
|
|
@ -346,9 +293,6 @@ typedef struct rd_kafka_cgrp_s {
|
|||
* assignment */
|
||||
} rkcg_c;
|
||||
|
||||
/* Timestamp of last rebalance start */
|
||||
rd_ts_t rkcg_ts_rebalance_start;
|
||||
|
||||
} rd_kafka_cgrp_t;
|
||||
|
||||
|
||||
|
|
@ -369,7 +313,6 @@ extern const char *rd_kafka_cgrp_join_state_names[];
|
|||
|
||||
void rd_kafka_cgrp_destroy_final(rd_kafka_cgrp_t *rkcg);
|
||||
rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk,
|
||||
rd_kafka_group_protocol_t group_protocol,
|
||||
const rd_kafkap_str_t *group_id,
|
||||
const rd_kafkap_str_t *client_id);
|
||||
void rd_kafka_cgrp_serve(rd_kafka_cgrp_t *rkcg);
|
||||
|
|
@ -437,7 +380,4 @@ rd_kafka_rebalance_protocol2str(rd_kafka_rebalance_protocol_t protocol) {
|
|||
}
|
||||
}
|
||||
|
||||
void rd_kafka_cgrp_consumer_expedite_next_heartbeat(rd_kafka_cgrp_t *rkcg,
|
||||
const char *reason);
|
||||
|
||||
#endif /* _RDKAFKA_CGRP_H_ */
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue