From 1f68cce865b5820fbb5d5cc2f004e7a29ba6814c Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Sat, 12 Jul 2025 17:01:52 +0700 Subject: [PATCH 01/21] feat: script to run codegen --- .cargo/config.toml | 2 + .gitignore | 2 +- Cargo.toml | 5 +- openapi.yml | 2541 ----------- typesense-go-unwrapped-api-spec.yaml | 3963 +++++++++++++++++ typesense/Cargo.toml | 6 +- typesense/src/client/collections.rs | 26 + typesense/src/client/documents.rs | 0 typesense/src/client/mod.rs | 144 + typesense/src/lib.rs | 1 + typesense_codegen/.openapi-generator-ignore | 2 + typesense_codegen/.openapi-generator/FILES | 94 +- typesense_codegen/.openapi-generator/VERSION | 2 +- typesense_codegen/README.md | 79 +- typesense_codegen/docs/AnalyticsApi.md | 79 +- ...ter.md => AnalyticsEventCreateResponse.md} | 4 +- ...meter.md => AnalyticsEventCreateSchema.md} | 9 +- .../docs/AnalyticsRuleDeleteResponse.md | 11 + .../docs/AnalyticsRuleParameters.md | 7 +- .../AnalyticsRuleParametersDestination.md | 3 +- .../docs/AnalyticsRuleParametersSource.md | 3 +- ...nalyticsRuleParametersSourceEventsInner.md | 13 + typesense_codegen/docs/AnalyticsRuleSchema.md | 4 +- .../docs/AnalyticsRuleUpsertSchema.md | 12 + .../docs/AnalyticsRulesRetrieveSchema.md | 2 +- typesense_codegen/docs/ApiKey.md | 4 +- .../docs/ApiKeyDeleteResponse.md | 11 + typesense_codegen/docs/ApiKeySchema.md | 4 +- typesense_codegen/docs/ApiKeysResponse.md | 2 +- typesense_codegen/docs/ApiStatsResponse.md | 23 + typesense_codegen/docs/CollectionAlias.md | 2 +- .../docs/CollectionAliasesResponse.md | 2 +- typesense_codegen/docs/CollectionResponse.md | 9 +- typesense_codegen/docs/CollectionSchema.md | 7 +- .../docs/CollectionUpdateSchema.md | 2 +- typesense_codegen/docs/CollectionsApi.md | 36 +- .../docs/ConversationModelCreateSchema.md | 19 + .../docs/ConversationModelSchema.md | 19 + .../docs/ConversationModelUpdateSchema.md | 19 + typesense_codegen/docs/ConversationsApi.md | 161 + .../docs/{PromoteApi.md => CurationApi.md} | 20 +- typesense_codegen/docs/DebugApi.md | 4 +- ...mentsDeleteDocumentsParametersParameter.md | 12 - .../docs/DeleteStopwordsSet200Response.md | 11 + typesense_codegen/docs/DirtyValues.md | 15 + .../docs/DocumentIndexParameters.md | 11 + typesense_codegen/docs/DocumentsApi.md | 322 +- typesense_codegen/docs/DropTokensMode.md | 14 + ...mentsExportDocumentsParametersParameter.md | 13 - typesense_codegen/docs/FacetCounts.md | 4 +- .../docs/FacetCountsCountsInner.md | 1 + typesense_codegen/docs/FacetCountsStats.md | 2 +- typesense_codegen/docs/Field.md | 22 +- typesense_codegen/docs/FieldEmbed.md | 2 +- .../docs/FieldEmbedModelConfig.md | 8 +- typesense_codegen/docs/HealthApi.md | 4 +- typesense_codegen/docs/IndexAction.md | 15 + typesense_codegen/docs/KeysApi.md | 16 +- .../ListStemmingDictionaries200Response.md | 11 + .../docs/MultiSearchCollectionParameters.md | 90 +- .../docs/MultiSearchParameters.md | 86 +- typesense_codegen/docs/MultiSearchResult.md | 3 +- .../docs/MultiSearchResultItem.md | 23 + .../docs/MultiSearchSearchesParameter.md | 3 +- typesense_codegen/docs/OperationsApi.md | 92 +- typesense_codegen/docs/OverrideApi.md | 4 +- typesense_codegen/docs/PresetDeleteSchema.md | 11 + typesense_codegen/docs/PresetSchema.md | 12 + typesense_codegen/docs/PresetUpsertSchema.md | 11 + .../docs/PresetUpsertSchemaValue.md | 12 + typesense_codegen/docs/PresetsApi.md | 130 + .../docs/PresetsRetrieveSchema.md | 11 + typesense_codegen/docs/SchemaChangeStatus.md | 13 + typesense_codegen/docs/ScopedKeyParameters.md | 2 +- typesense_codegen/docs/SearchGroupedHit.md | 2 +- typesense_codegen/docs/SearchHighlight.md | 4 +- typesense_codegen/docs/SearchOverride.md | 13 +- .../docs/SearchOverrideDeleteResponse.md | 11 + typesense_codegen/docs/SearchOverrideRule.md | 6 +- .../docs/SearchOverrideSchema.md | 13 +- .../docs/SearchOverridesResponse.md | 2 +- typesense_codegen/docs/SearchParameters.md | 94 +- typesense_codegen/docs/SearchResult.md | 14 +- .../docs/SearchResultConversation.md | 14 + typesense_codegen/docs/SearchResultHit.md | 9 +- .../docs/SearchResultHitTextMatchInfo.md | 17 + .../docs/SearchResultRequestParams.md | 3 +- .../SearchResultRequestParamsVoiceQuery.md | 11 + typesense_codegen/docs/SearchSynonym.md | 2 + .../docs/SearchSynonymDeleteResponse.md | 11 + typesense_codegen/docs/SearchSynonymSchema.md | 2 + .../docs/SearchSynonymsResponse.md | 2 +- typesense_codegen/docs/StemmingApi.md | 99 + typesense_codegen/docs/StemmingDictionary.md | 12 + .../docs/StemmingDictionaryWordsInner.md | 12 + typesense_codegen/docs/StopwordsApi.md | 130 + .../docs/StopwordsSetRetrieveSchema.md | 11 + typesense_codegen/docs/StopwordsSetSchema.md | 13 + .../docs/StopwordsSetUpsertSchema.md | 12 + .../docs/StopwordsSetsRetrieveAllSchema.md | 11 + typesense_codegen/docs/SynonymsApi.md | 132 + .../docs/VoiceQueryModelCollectionConfig.md | 11 + typesense_codegen/src/apis/analytics_api.rs | 416 +- typesense_codegen/src/apis/collections_api.rs | 721 ++- typesense_codegen/src/apis/configuration.rs | 10 +- .../src/apis/conversations_api.rs | 280 ++ typesense_codegen/src/apis/curation_api.rs | 178 + typesense_codegen/src/apis/debug_api.rs | 76 +- typesense_codegen/src/apis/documents_api.rs | 2067 +++++---- typesense_codegen/src/apis/health_api.rs | 76 +- typesense_codegen/src/apis/keys_api.rs | 319 +- typesense_codegen/src/apis/mod.rs | 44 +- typesense_codegen/src/apis/operations_api.rs | 297 +- typesense_codegen/src/apis/override_api.rs | 87 +- typesense_codegen/src/apis/presets_api.rs | 229 + typesense_codegen/src/apis/promote_api.rs | 192 - typesense_codegen/src/apis/stemming_api.rs | 177 + typesense_codegen/src/apis/stopwords_api.rs | 229 + typesense_codegen/src/apis/synonyms_api.rs | 233 + typesense_codegen/src/lib.rs | 10 +- .../models/analytics_event_create_response.rs | 27 + .../models/analytics_event_create_schema.rs | 33 + .../models/analytics_rule_delete_response.rs | 27 + .../src/models/analytics_rule_parameters.rs | 31 +- .../analytics_rule_parameters_destination.rs | 21 +- .../analytics_rule_parameters_source.rs | 21 +- ...ics_rule_parameters_source_events_inner.rs | 33 + .../src/models/analytics_rule_schema.rs | 42 +- .../models/analytics_rule_upsert_schema.rs | 46 + .../models/analytics_rules_retrieve_schema.rs | 14 +- typesense_codegen/src/models/api_key.rs | 22 +- .../src/models/api_key_delete_response.rs | 28 + .../src/models/api_key_schema.rs | 26 +- .../src/models/api_keys_response.rs | 16 +- typesense_codegen/src/models/api_response.rs | 12 +- .../src/models/api_stats_response.rs | 63 + .../src/models/collection_alias.rs | 18 +- .../src/models/collection_alias_schema.rs | 12 +- .../src/models/collection_aliases_response.rs | 16 +- .../src/models/collection_response.rs | 62 +- .../src/models/collection_schema.rs | 49 +- .../src/models/collection_update_schema.rs | 16 +- .../conversation_model_create_schema.rs | 60 + .../src/models/conversation_model_schema.rs | 60 + .../conversation_model_update_schema.rs | 60 + .../src/models/debug_200_response.rs | 12 +- .../models/delete_documents_200_response.rs | 12 +- ...s_delete_documents_parameters_parameter.rs | 27 - .../delete_stopwords_set_200_response.rs | 27 + typesense_codegen/src/models/dirty_values.rs | 44 + .../src/models/document_index_parameters.rs | 27 + .../src/models/drop_tokens_mode.rs | 42 + .../src/models/error_response.rs | 12 +- ...s_export_documents_parameters_parameter.rs | 35 - typesense_codegen/src/models/facet_counts.rs | 12 +- .../src/models/facet_counts_counts_inner.rs | 11 +- .../src/models/facet_counts_stats.rs | 14 +- typesense_codegen/src/models/field.rs | 82 +- typesense_codegen/src/models/field_embed.rs | 15 +- .../src/models/field_embed_model_config.rs | 32 +- typesense_codegen/src/models/health_status.rs | 12 +- ...s_import_documents_parameters_parameter.rs | 53 - typesense_codegen/src/models/index_action.rs | 44 + ...list_stemming_dictionaries_200_response.rs | 27 + typesense_codegen/src/models/mod.rs | 78 +- .../multi_search_collection_parameters.rs | 392 +- .../src/models/multi_search_parameters.rs | 377 +- .../src/models/multi_search_result.rs | 23 +- .../src/models/multi_search_result_item.rs | 71 + .../models/multi_search_searches_parameter.rs | 22 +- .../src/models/preset_delete_schema.rs | 27 + typesense_codegen/src/models/preset_schema.rs | 30 + .../src/models/preset_upsert_schema.rs | 27 + .../src/models/preset_upsert_schema_value.rs | 26 + .../src/models/presets_retrieve_schema.rs | 27 + .../src/models/schema_change_status.rs | 36 + .../src/models/scoped_key_parameters.rs | 14 +- .../src/models/search_grouped_hit.rs | 21 +- .../src/models/search_highlight.rs | 22 +- .../src/models/search_override.rs | 63 +- .../models/search_override_delete_response.rs | 28 + .../src/models/search_override_exclude.rs | 12 +- .../src/models/search_override_include.rs | 13 +- .../src/models/search_override_rule.rs | 36 +- .../src/models/search_override_schema.rs | 63 +- .../src/models/search_overrides_response.rs | 16 +- .../src/models/search_parameters.rs | 410 +- typesense_codegen/src/models/search_result.rs | 56 +- .../src/models/search_result_conversation.rs | 36 + .../src/models/search_result_hit.rs | 48 +- .../search_result_hit_text_match_info.rs | 45 + .../models/search_result_request_params.rs | 19 +- ...earch_result_request_params_voice_query.rs | 27 + .../src/models/search_synonym.rs | 16 +- .../models/search_synonym_delete_response.rs | 28 + .../src/models/search_synonym_schema.rs | 16 +- .../src/models/search_synonyms_response.rs | 16 +- .../src/models/snapshot_parameters.rs | 8 +- .../src/models/stemming_dictionary.rs | 32 + .../models/stemming_dictionary_words_inner.rs | 32 + .../models/stopwords_set_retrieve_schema.rs | 27 + .../src/models/stopwords_set_schema.rs | 33 + .../src/models/stopwords_set_upsert_schema.rs | 30 + .../stopwords_sets_retrieve_all_schema.rs | 27 + .../src/models/success_status.rs | 12 +- .../models/update_documents_200_response.rs | 12 +- ...s_update_documents_parameters_parameter.rs | 21 - .../voice_query_model_collection_config.rs | 29 + xtask/Cargo.toml | 9 + xtask/src/main.rs | 124 + 210 files changed, 12530 insertions(+), 6020 deletions(-) create mode 100644 .cargo/config.toml delete mode 100644 openapi.yml create mode 100644 typesense-go-unwrapped-api-spec.yaml create mode 100644 typesense/src/client/collections.rs create mode 100644 typesense/src/client/documents.rs create mode 100644 typesense/src/client/mod.rs rename typesense_codegen/docs/{UpdateDocumentsUpdateDocumentsParametersParameter.md => AnalyticsEventCreateResponse.md} (72%) rename typesense_codegen/docs/{ImportDocumentsImportDocumentsParametersParameter.md => AnalyticsEventCreateSchema.md} (50%) create mode 100644 typesense_codegen/docs/AnalyticsRuleDeleteResponse.md create mode 100644 typesense_codegen/docs/AnalyticsRuleParametersSourceEventsInner.md create mode 100644 typesense_codegen/docs/AnalyticsRuleUpsertSchema.md create mode 100644 typesense_codegen/docs/ApiKeyDeleteResponse.md create mode 100644 typesense_codegen/docs/ApiStatsResponse.md create mode 100644 typesense_codegen/docs/ConversationModelCreateSchema.md create mode 100644 typesense_codegen/docs/ConversationModelSchema.md create mode 100644 typesense_codegen/docs/ConversationModelUpdateSchema.md create mode 100644 typesense_codegen/docs/ConversationsApi.md rename typesense_codegen/docs/{PromoteApi.md => CurationApi.md} (71%) delete mode 100644 typesense_codegen/docs/DeleteDocumentsDeleteDocumentsParametersParameter.md create mode 100644 typesense_codegen/docs/DeleteStopwordsSet200Response.md create mode 100644 typesense_codegen/docs/DirtyValues.md create mode 100644 typesense_codegen/docs/DocumentIndexParameters.md create mode 100644 typesense_codegen/docs/DropTokensMode.md delete mode 100644 typesense_codegen/docs/ExportDocumentsExportDocumentsParametersParameter.md create mode 100644 typesense_codegen/docs/IndexAction.md create mode 100644 typesense_codegen/docs/ListStemmingDictionaries200Response.md create mode 100644 typesense_codegen/docs/MultiSearchResultItem.md create mode 100644 typesense_codegen/docs/PresetDeleteSchema.md create mode 100644 typesense_codegen/docs/PresetSchema.md create mode 100644 typesense_codegen/docs/PresetUpsertSchema.md create mode 100644 typesense_codegen/docs/PresetUpsertSchemaValue.md create mode 100644 typesense_codegen/docs/PresetsApi.md create mode 100644 typesense_codegen/docs/PresetsRetrieveSchema.md create mode 100644 typesense_codegen/docs/SchemaChangeStatus.md create mode 100644 typesense_codegen/docs/SearchOverrideDeleteResponse.md create mode 100644 typesense_codegen/docs/SearchResultConversation.md create mode 100644 typesense_codegen/docs/SearchResultHitTextMatchInfo.md create mode 100644 typesense_codegen/docs/SearchResultRequestParamsVoiceQuery.md create mode 100644 typesense_codegen/docs/SearchSynonymDeleteResponse.md create mode 100644 typesense_codegen/docs/StemmingApi.md create mode 100644 typesense_codegen/docs/StemmingDictionary.md create mode 100644 typesense_codegen/docs/StemmingDictionaryWordsInner.md create mode 100644 typesense_codegen/docs/StopwordsApi.md create mode 100644 typesense_codegen/docs/StopwordsSetRetrieveSchema.md create mode 100644 typesense_codegen/docs/StopwordsSetSchema.md create mode 100644 typesense_codegen/docs/StopwordsSetUpsertSchema.md create mode 100644 typesense_codegen/docs/StopwordsSetsRetrieveAllSchema.md create mode 100644 typesense_codegen/docs/SynonymsApi.md create mode 100644 typesense_codegen/docs/VoiceQueryModelCollectionConfig.md create mode 100644 typesense_codegen/src/apis/conversations_api.rs create mode 100644 typesense_codegen/src/apis/curation_api.rs create mode 100644 typesense_codegen/src/apis/presets_api.rs delete mode 100644 typesense_codegen/src/apis/promote_api.rs create mode 100644 typesense_codegen/src/apis/stemming_api.rs create mode 100644 typesense_codegen/src/apis/stopwords_api.rs create mode 100644 typesense_codegen/src/apis/synonyms_api.rs create mode 100644 typesense_codegen/src/models/analytics_event_create_response.rs create mode 100644 typesense_codegen/src/models/analytics_event_create_schema.rs create mode 100644 typesense_codegen/src/models/analytics_rule_delete_response.rs create mode 100644 typesense_codegen/src/models/analytics_rule_parameters_source_events_inner.rs create mode 100644 typesense_codegen/src/models/analytics_rule_upsert_schema.rs create mode 100644 typesense_codegen/src/models/api_key_delete_response.rs create mode 100644 typesense_codegen/src/models/api_stats_response.rs create mode 100644 typesense_codegen/src/models/conversation_model_create_schema.rs create mode 100644 typesense_codegen/src/models/conversation_model_schema.rs create mode 100644 typesense_codegen/src/models/conversation_model_update_schema.rs delete mode 100644 typesense_codegen/src/models/delete_documents_delete_documents_parameters_parameter.rs create mode 100644 typesense_codegen/src/models/delete_stopwords_set_200_response.rs create mode 100644 typesense_codegen/src/models/dirty_values.rs create mode 100644 typesense_codegen/src/models/document_index_parameters.rs create mode 100644 typesense_codegen/src/models/drop_tokens_mode.rs delete mode 100644 typesense_codegen/src/models/export_documents_export_documents_parameters_parameter.rs delete mode 100644 typesense_codegen/src/models/import_documents_import_documents_parameters_parameter.rs create mode 100644 typesense_codegen/src/models/index_action.rs create mode 100644 typesense_codegen/src/models/list_stemming_dictionaries_200_response.rs create mode 100644 typesense_codegen/src/models/multi_search_result_item.rs create mode 100644 typesense_codegen/src/models/preset_delete_schema.rs create mode 100644 typesense_codegen/src/models/preset_schema.rs create mode 100644 typesense_codegen/src/models/preset_upsert_schema.rs create mode 100644 typesense_codegen/src/models/preset_upsert_schema_value.rs create mode 100644 typesense_codegen/src/models/presets_retrieve_schema.rs create mode 100644 typesense_codegen/src/models/schema_change_status.rs create mode 100644 typesense_codegen/src/models/search_override_delete_response.rs create mode 100644 typesense_codegen/src/models/search_result_conversation.rs create mode 100644 typesense_codegen/src/models/search_result_hit_text_match_info.rs create mode 100644 typesense_codegen/src/models/search_result_request_params_voice_query.rs create mode 100644 typesense_codegen/src/models/search_synonym_delete_response.rs create mode 100644 typesense_codegen/src/models/stemming_dictionary.rs create mode 100644 typesense_codegen/src/models/stemming_dictionary_words_inner.rs create mode 100644 typesense_codegen/src/models/stopwords_set_retrieve_schema.rs create mode 100644 typesense_codegen/src/models/stopwords_set_schema.rs create mode 100644 typesense_codegen/src/models/stopwords_set_upsert_schema.rs create mode 100644 typesense_codegen/src/models/stopwords_sets_retrieve_all_schema.rs delete mode 100644 typesense_codegen/src/models/update_documents_update_documents_parameters_parameter.rs create mode 100644 typesense_codegen/src/models/voice_query_model_collection_config.rs create mode 100644 xtask/Cargo.toml create mode 100644 xtask/src/main.rs diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000..f0ccbc9 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,2 @@ +[alias] +xtask = "run --package xtask --" \ No newline at end of file diff --git a/.gitignore b/.gitignore index e551aa3..3549fae 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,3 @@ /target Cargo.lock -.env +.env \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index dd9279f..322f339 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,9 @@ + [workspace] members = [ "typesense", "typesense_derive", - "typesense_codegen" + "typesense_codegen", + "xtask", ] + diff --git a/openapi.yml b/openapi.yml deleted file mode 100644 index 3bac4f6..0000000 --- a/openapi.yml +++ /dev/null @@ -1,2541 +0,0 @@ -openapi: 3.0.3 -info: - title: Typesense API - description: "An open source search engine for building delightful search experiences." - version: 0.25.0 -externalDocs: - description: Find out more about Typsesense - url: https://typesense.org -security: - - api_key_header: [] -tags: - - name: collections - description: A collection is defined by a schema - externalDocs: - description: Find out more - url: https://typesense.org/api/#create-collection - - name: documents - description: A document is an individual record to be indexed and belongs to a collection - externalDocs: - description: Find out more - url: https://typesense.org/api/#index-document - - name: promote - description: Promote certain documents over others - externalDocs: - description: Find out more - url: https://typesense.org/docs/0.23.0/api/#curation - - name: analytics - description: Typesense can aggregate search queries for both analytics purposes and for query suggestions. - externalDocs: - description: Find out more - url: https://typesense.org/docs/0.25.0/api/analytics-query-suggestions.html - - name: keys - description: Manage API Keys with fine-grain access control - externalDocs: - description: Find out more - url: https://typesense.org/docs/0.23.0/api/#api-keys - - name: debug - description: Debugging information - - name: operations - description: Manage Typesense cluster - externalDocs: - description: Find out more - url: https://typesense.org/docs/0.23.0/api/#cluster-operations - -paths: - /collections: - get: - tags: - - collections - summary: List all collections - description: - Returns a summary of all your collections. The collections are - returned sorted by creation date, with the most recent collections appearing - first. - operationId: getCollections - responses: - 200: - description: List of all collections - content: - application/json: - schema: - type: array - x-go-type: "[]*CollectionResponse" - items: - $ref: "#/components/schemas/CollectionResponse" - post: - tags: - - collections - summary: Create a new collection - description: - When a collection is created, we give it a name and describe the - fields that will be indexed from the documents added to the collection. - operationId: createCollection - requestBody: - description: The collection object to be created - content: - application/json: - schema: - $ref: "#/components/schemas/CollectionSchema" - required: true - responses: - 201: - description: Collection successfully created - content: - application/json: - schema: - $ref: "#/components/schemas/CollectionResponse" - 400: - description: Bad request, see error message for details - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - 409: - description: Collection already exists - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - /collections/{collectionName}: - get: - tags: - - collections - summary: Retrieve a single collection - description: Retrieve the details of a collection, given its name. - operationId: getCollection - parameters: - - name: collectionName - in: path - description: The name of the collection to retrieve - required: true - schema: - type: string - responses: - 200: - description: Collection fetched - content: - application/json: - schema: - $ref: "#/components/schemas/CollectionResponse" - 404: - description: Collection not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - patch: - tags: - - collections - summary: Update a collection - description: - Update a collection's schema to modify the fields and their types. - operationId: updateCollection - parameters: - - name: collectionName - in: path - description: The name of the collection to update - required: true - schema: - type: string - requestBody: - description: The document object with fields to be updated - content: - application/json: - schema: - $ref: "#/components/schemas/CollectionUpdateSchema" - required: true - responses: - 200: - description: The updated partial collection schema - content: - application/json: - schema: - $ref: "#/components/schemas/CollectionUpdateSchema" - 400: - description: Bad request, see error message for details - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - 404: - description: The collection was not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - delete: - tags: - - collections - summary: Delete a collection - description: - Permanently drops a collection. This action cannot be undone. For - large collections, this might have an impact on read latencies. - operationId: deleteCollection - parameters: - - name: collectionName - in: path - description: The name of the collection to delete - required: true - schema: - type: string - responses: - 200: - description: Collection deleted - content: - application/json: - schema: - $ref: "#/components/schemas/CollectionResponse" - 404: - description: Collection not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - /collections/{collectionName}/documents: - post: - tags: - - documents - summary: Index a document - description: - A document to be indexed in a given collection must conform to - the schema of the collection. - operationId: indexDocument - parameters: - - name: collectionName - in: path - description: The name of the collection to add the document to - required: true - schema: - type: string - - name: action - in: query - description: Additional action to perform - schema: - type: string - example: upsert - enum: - - upsert - requestBody: - description: The document object to be indexed - content: - application/json: - schema: - type: object - description: Can be any key-value pair - x-go-type: "interface{}" - required: true - responses: - 201: - description: Document successfully created/indexed - content: - application/json: - schema: - type: object - description: Can be any key-value pair - 404: - description: Collection not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - patch: - tags: - - documents - summary: Update documents with conditional query - description: - The filter_by query parameter is used to filter to specify a condition against which the documents are matched. - The request body contains the fields that should be updated for any documents that match the filter condition. - This endpoint is only available if the Typesense server is version `0.25.0.rc12` or later. - operationId: updateDocuments - parameters: - - name: collectionName - in: path - description: The name of the collection to update documents in - required: true - schema: - type: string - - name: updateDocumentsParameters - in: query - schema: - type: object - properties: - filter_by: - type: string - example: "num_employees:>100 && country: [USA, UK]" - responses: - '200': - description: - The response contains a single field, `num_updated`, indicating the number of documents affected. - content: - application/json: - schema: - type: object - required: - - num_updated - properties: - num_updated: - type: integer - description: The number of documents that have been updated - example: 1 - '400': - description: 'Bad request, see error message for details' - content: - application/json: - schema: - $ref: '#/components/schemas/ApiResponse' - '404': - description: The collection was not found - content: - application/json: - schema: - $ref: '#/components/schemas/ApiResponse' - requestBody: - description: The document fields to be updated - content: - application/json: - schema: - type: object - description: Can be any key-value pair - x-go-type: "interface{}" - required: true - delete: - tags: - - documents - summary: Delete a bunch of documents - description: - Delete a bunch of documents that match a specific filter condition. - Use the `batch_size` parameter to control the number of documents that - should deleted at a time. A larger value will speed up deletions, but will - impact performance of other operations running on the server. - operationId: deleteDocuments - parameters: - - name: collectionName - in: path - description: The name of the collection to delete documents from - required: true - schema: - type: string - - name: deleteDocumentsParameters - in: query - schema: - type: object - properties: - filter_by: - type: string - example: "num_employees:>100 && country: [USA, UK]" - batch_size: - description: - Batch size parameter controls the number of documents that should be deleted - at a time. A larger value will speed up deletions, but will impact performance - of other operations running on the server. - type: integer - responses: - 200: - description: Documents successfully deleted - content: - application/json: - schema: - type: object - required: - - num_deleted - properties: - num_deleted: - type: integer - 404: - description: Collection not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - /collections/{collectionName}/documents/search: - get: - tags: - - documents - summary: Search for documents in a collection - description: Search for documents in a collection that match the search criteria. - operationId: searchCollection - parameters: - - name: collectionName - in: path - description: The name of the collection to search for the document under - required: true - schema: - type: string - - name: searchParameters - required: true - in: query - schema: - $ref: "#/components/schemas/SearchParameters" - responses: - 200: - description: Search results - content: - application/json: - schema: - $ref: "#/components/schemas/SearchResult" - 400: - description: Bad request, see error message for details - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - 404: - description: The collection or field was not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - /collections/{collectionName}/overrides: - get: - tags: - - documents - - promote - summary: List all collection overrides - operationId: getSearchOverrides - parameters: - - name: collectionName - in: path - description: The name of the collection - required: true - schema: - type: string - responses: - 200: - description: List of all search overrides - content: - application/json: - schema: - $ref: "#/components/schemas/SearchOverridesResponse" - /collections/{collectionName}/overrides/{overrideId}: - get: - tags: - - documents - - override - summary: Retrieve a single search override - description: Retrieve the details of a search override, given its id. - operationId: getSearchOverride - parameters: - - name: collectionName - in: path - description: The name of the collection - required: true - schema: - type: string - - name: overrideId - in: path - description: The id of the search override - required: true - schema: - type: string - responses: - 200: - description: Search override fetched - content: - application/json: - schema: - $ref: "#/components/schemas/SearchOverride" - put: - tags: - - documents - - promote - summary: Create or update an override to promote certain documents over others - description: - Create or update an override to promote certain documents over others. - Using overrides, you can include or exclude specific documents for a given query. - operationId: upsertSearchOverride - parameters: - - name: collectionName - in: path - description: The name of the collection - required: true - schema: - type: string - - name: overrideId - in: path - description: The ID of the search override to create/update - required: true - schema: - type: string - requestBody: - description: The search override object to be created/updated - content: - application/json: - schema: - $ref: "#/components/schemas/SearchOverrideSchema" - required: true - responses: - 200: - description: Created/updated search override - content: - application/json: - schema: - $ref: "#/components/schemas/SearchOverride" - 404: - description: Search override not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - delete: - tags: - - documents - - promote - summary: Delete an override associated with a collection - operationId: deleteSearchOverride - parameters: - - name: collectionName - in: path - description: The name of the collection - required: true - schema: - type: string - - name: overrideId - in: path - description: The ID of the search override to delete - required: true - schema: - type: string - responses: - 200: - description: The ID of the deleted search override - content: - application/json: - schema: - $ref: "#/components/schemas/SearchOverride" - 404: - description: Search override not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - /collections/{collectionName}/synonyms: - get: - tags: - - documents - summary: List all collection synonyms - operationId: getSearchSynonyms - parameters: - - name: collectionName - in: path - description: The name of the collection - required: true - schema: - type: string - responses: - 200: - description: List of all search synonyms - content: - application/json: - schema: - $ref: "#/components/schemas/SearchSynonymsResponse" - 404: - description: Search synonyms was not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - /collections/{collectionName}/synonyms/{synonymId}: - get: - tags: - - documents - summary: Retrieve a single search synonym - description: Retrieve the details of a search synonym, given its id. - operationId: getSearchSynonym - parameters: - - name: collectionName - in: path - description: The name of the collection - required: true - schema: - type: string - - name: synonymId - in: path - description: The id of the search synonym - required: true - schema: - type: string - responses: - 200: - description: Search synonym fetched - content: - application/json: - schema: - $ref: "#/components/schemas/SearchSynonym" - 404: - description: Search synonym was not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - put: - tags: - - documents - summary: Create or update a synonym - description: Create or update a synonym to define search terms that should be considered equivalent. - operationId: upsertSearchSynonym - parameters: - - name: collectionName - in: path - description: The name of the collection - required: true - schema: - type: string - - name: synonymId - in: path - description: The ID of the search synonym to create/update - required: true - schema: - type: string - requestBody: - description: The search synonym object to be created/updated - content: - application/json: - schema: - $ref: "#/components/schemas/SearchSynonymSchema" - required: true - responses: - 200: - description: Created/updated search synonym - content: - application/json: - schema: - $ref: "#/components/schemas/SearchSynonym" - 404: - description: Search synonym was not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - delete: - tags: - - documents - summary: Delete a synonym associated with a collection - operationId: deleteSearchSynonym - parameters: - - name: collectionName - in: path - description: The name of the collection - required: true - schema: - type: string - - name: synonymId - in: path - description: The ID of the search synonym to delete - required: true - schema: - type: string - responses: - 200: - description: The ID of the deleted search synonym - content: - application/json: - schema: - $ref: "#/components/schemas/SearchSynonym" - 404: - description: Search synonym not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - - /collections/{collectionName}/documents/export: - get: - tags: - - documents - summary: Export all documents in a collection - description: Export all documents in a collection in JSON lines format. - operationId: exportDocuments - parameters: - - name: collectionName - in: path - description: The name of the collection - required: true - schema: - type: string - - name: exportDocumentsParameters - in: query - schema: - type: object - required: - - include_fields - - exclude_fields - properties: - filter_by: - description: - Filter conditions for refining your search results. Separate - multiple conditions with &&. - type: string - include_fields: - description: List of fields from the document to include in the search result - type: string - exclude_fields: - description: List of fields from the document to exclude in the search result - type: string - - responses: - 200: - description: Exports all the documents in a given collection. - content: - application/octet-stream: - schema: - type: string - example: | - {"id": "124", "company_name": "Stark Industries", "num_employees": 5215, "country": "US"} - {"id": "125", "company_name": "Future Technology", "num_employees": 1232,"country": "UK"} - {"id": "126", "company_name": "Random Corp.", "num_employees": 531,"country": "AU"} - 404: - description: The collection was not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - /collections/{collectionName}/documents/import: - post: - tags: - - documents - summary: Import documents into a collection - description: - The documents to be imported must be formatted in a newline delimited - JSON structure. You can feed the output file from a Typesense export operation - directly as import. - operationId: importDocuments - parameters: - - name: collectionName - in: path - description: The name of the collection - required: true - schema: - type: string - - name: importDocumentsParameters - in: query - schema: - type: object - properties: - action: - type: string - batch_size: - type: integer - dirty_values: - type: string - enum: - - coerce_or_reject - - coerce_or_drop - - drop - - reject - remote_embedding_batch_size: - type: integer - requestBody: - description: The json array of documents or the JSONL file to import - content: - application/octet-stream: - schema: - type: string - description: The JSONL file to import - required: true - responses: - 200: - description: - Result of the import operation. Each line of the response indicates the result - of each document present in the request body (in the same order). If the import - of a single document fails, it does not affect the other documents. - If there is a failure, the response line will include a corresponding error - message and as well as the actual document content. - content: - application/octet-stream: - schema: - type: string - example: | - {"success": true} - {"success": false, "error": "Bad JSON.", "document": "[bad doc"} - 400: - description: Bad request, see error message for details - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - 404: - description: The collection was not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - /collections/{collectionName}/documents/{documentId}: - get: - tags: - - documents - summary: Retreive a document - description: Fetch an individual document from a collection by using its ID. - operationId: getDocument - parameters: - - name: collectionName - in: path - description: The name of the collection to search for the document under - required: true - schema: - type: string - - name: documentId - in: path - description: The Document ID - required: true - schema: - type: string - responses: - 200: - description: The document referenced by the ID - content: - application/json: - schema: - type: object - description: Can be any key-value pair - 404: - description: The document or collection was not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - patch: - tags: - - documents - summary: Update a document - description: - Update an individual document from a collection by using its ID. - The update can be partial. - operationId: updateDocument - parameters: - - name: collectionName - in: path - description: The name of the collection to search for the document under - required: true - schema: - type: string - - name: documentId - in: path - description: The Document ID - required: true - schema: - type: string - requestBody: - description: The document object with fields to be updated - content: - application/json: - schema: - type: object - description: Can be any key-value pair - x-go-type: "interface{}" - required: true - responses: - 200: - description: The document referenced by the ID was updated - content: - application/json: - schema: - type: object - description: Can be any key-value pair - 404: - description: The document or collection was not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - delete: - tags: - - documents - summary: Delete a document - description: Delete an individual document from a collection by using its ID. - operationId: deleteDocument - parameters: - - name: collectionName - in: path - description: The name of the collection to search for the document under - required: true - schema: - type: string - - name: documentId - in: path - description: The Document ID - required: true - schema: - type: string - responses: - 200: - description: The document referenced by the ID was deleted - content: - application/json: - schema: - type: object - description: Can be any key-value pair - 404: - description: The document or collection was not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - /keys: - get: - tags: - - keys - summary: Retrieve (metadata about) all keys. - operationId: getKeys - responses: - 200: - description: List of all keys - content: - application/json: - schema: - $ref: "#/components/schemas/ApiKeysResponse" - post: - tags: - - keys - summary: Create an API Key - description: - Create an API Key with fine-grain access control. You can restrict access - on both a per-collection and per-action level. - The generated key is returned only during creation. You want to store - this key carefully in a secure place. - operationId: createKey - requestBody: - description: The object that describes API key scope - content: - application/json: - schema: - $ref: "#/components/schemas/ApiKeySchema" - responses: - 201: - description: Created API key - content: - application/json: - schema: - $ref: "#/components/schemas/ApiKey" - 400: - description: Bad request, see error message for details - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - 409: - description: API key generation conflict - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - /keys/{keyId}: - get: - tags: - - keys - summary: Retrieve (metadata about) a key - description: - Retrieve (metadata about) a key. Only the key prefix is returned - when you retrieve a key. Due to security reasons, only the create endpoint - returns the full API key. - operationId: getKey - parameters: - - name: keyId - in: path - description: The ID of the key to retrieve - required: true - schema: - type: integer - format: int64 - responses: - 200: - description: The key referenced by the ID - content: - application/json: - schema: - $ref: "#/components/schemas/ApiKey" - 404: - description: The key was not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - delete: - tags: - - keys - summary: Delete an API key given its ID. - operationId: deleteKey - parameters: - - name: keyId - in: path - description: The ID of the key to delete - required: true - schema: - type: integer - format: int64 - responses: - 200: - description: The key referenced by the ID - content: - application/json: - schema: - $ref: "#/components/schemas/ApiKey" - 400: - description: Bad request, see error message for details - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - 404: - description: Key not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - /aliases: - get: - tags: - - collections - summary: List all aliases - description: List all aliases and the corresponding collections that they map to. - operationId: getAliases - responses: - 200: - description: List of all collection aliases - content: - application/json: - schema: - $ref: "#/components/schemas/CollectionAliasesResponse" - /aliases/{aliasName}: - put: - tags: - - collections - summary: Create or update a collection alias - description: - Create or update a collection alias. An alias is a virtual collection name that points - to a real collection. If you're familiar with symbolic links on Linux, it's very similar - to that. Aliases are useful when you want to reindex your data in the - background on a new collection and switch your application to it without any changes to - your code. - operationId: upsertAlias - parameters: - - name: aliasName - in: path - description: The name of the alias to create/update - required: true - schema: - type: string - requestBody: - description: Collection alias to be created/updated - content: - application/json: - schema: - $ref: "#/components/schemas/CollectionAliasSchema" - responses: - 200: - description: The collection alias was created/updated - content: - application/json: - schema: - $ref: "#/components/schemas/CollectionAlias" - 400: - description: Bad request, see error message for details - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - 404: - description: Alias not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - get: - tags: - - collections - summary: Retrieve an alias - description: Find out which collection an alias points to by fetching it - operationId: getAlias - parameters: - - name: aliasName - in: path - description: The name of the alias to retrieve - required: true - schema: - type: string - responses: - 200: - description: Collection alias fetched - content: - application/json: - schema: - $ref: "#/components/schemas/CollectionAlias" - 404: - description: The alias was not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - delete: - tags: - - collections - summary: Delete an alias - operationId: deleteAlias - parameters: - - name: aliasName - in: path - description: The name of the alias to delete - required: true - schema: - type: string - responses: - 200: - description: Collection alias was deleted - content: - application/json: - schema: - $ref: "#/components/schemas/CollectionAlias" - 404: - description: Alias not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - /debug: - get: - tags: - - debug - summary: Print debugging information - description: Print debugging information - operationId: debug - responses: - 200: - description: Debugging information - content: - application/json: - schema: - type: object - properties: - version: - type: string - /health: - get: - tags: - - health - summary: Checks if Typesense server is ready to accept requests. - description: Checks if Typesense server is ready to accept requests. - operationId: health - responses: - 200: - description: Search service is ready for requests. - content: - application/json: - schema: - $ref: "#/components/schemas/HealthStatus" - /operations/snapshot: - post: - tags: - - operations - summary: Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. - description: - Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. - You can then backup the snapshot directory that gets created and later restore it - as a data directory, as needed. - operationId: takeSnapshot - parameters: - - name: snapshot_path - in: query - description: The directory on the server where the snapshot should be saved. - required: true - schema: - type: string - responses: - 201: - description: Snapshot is created. - content: - application/json: - schema: - $ref: "#/components/schemas/SuccessStatus" - /operations/vote: - post: - tags: - - operations - summary: Triggers a follower node to initiate the raft voting process, which triggers leader re-election. - description: - Triggers a follower node to initiate the raft voting process, which triggers leader re-election. - The follower node that you run this operation against will become the new leader, - once this command succeeds. - operationId: vote - responses: - 200: - description: Re-election is performed. - content: - application/json: - schema: - $ref: "#/components/schemas/SuccessStatus" - /multi_search: - post: - operationId: multiSearch - tags: - - documents - summary: send multiple search requests in a single HTTP request - description: - This is especially useful to avoid round-trip network latencies incurred otherwise if each of these requests are sent in separate HTTP requests. - You can also use this feature to do a federated search across multiple collections in a single HTTP request. - parameters: - - name: multiSearchParameters - required: true - in: query - schema: - $ref: "#/components/schemas/MultiSearchParameters" - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/MultiSearchSearchesParameter" - responses: - 200: - description: Search results - content: - application/json: - schema: - $ref: "#/components/schemas/MultiSearchResult" - 400: - description: Bad request, see error message for details - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - /analytics/rules: - post: - tags: - - analytics - summary: Creates an analytics rule - description: - When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. - operationId: createAnalyticsRule - requestBody: - description: The Analytics rule to be created - content: - application/json: - schema: - $ref: "#/components/schemas/AnalyticsRuleSchema" - required: true - responses: - 201: - description: Analytics rule successfully created - content: - application/json: - schema: - $ref: "#/components/schemas/AnalyticsRuleSchema" - 400: - description: Bad request, see error message for details - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - get: - tags: - - analytics - summary: Retrieves all analytics rules - description: - Retrieve the details of all analytics rules - operationId: retrieveAnalyticsRules - responses: - 200: - description: Analytics rules fetched - content: - application/json: - schema: - $ref: "#/components/schemas/AnalyticsRulesRetrieveSchema" - /analytics/rules/{ruleName}: - get: - tags: - - analytics - summary: Retrieves an analytics rule - description: - Retrieve the details of an analytics rule, given it's name - operationId: retrieveAnalyticsRule - parameters: - - in: path - name: ruleName - description: The name of the analytics rule to retrieve - schema: - type: string - required: true - responses: - 200: - description: Analytics rule fetched - content: - application/json: - schema: - $ref: "#/components/schemas/AnalyticsRuleSchema" - 404: - description: Analytics rule not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" - delete: - tags: - - analytics - summary: Delete an analytics rule - description: - Permanently deletes an analytics rule, given it's name - operationId: deleteAnalyticsRule - parameters: - - in: path - name: ruleName - description: The name of the analytics rule to delete - schema: - type: string - required: true - responses: - 200: - description: Analytics rule deleted - content: - application/json: - schema: - $ref: "#/components/schemas/AnalyticsRuleSchema" - 404: - description: Analytics rule not found - content: - application/json: - schema: - $ref: "#/components/schemas/ApiResponse" -components: - schemas: - CollectionSchema: - required: - - name - - fields - type: object - properties: - name: - type: string - description: Name of the collection - example: companies - fields: - type: array - description: A list of fields for querying, filtering and faceting - example: - - name: num_employees - type: int32 - facet: false - - name: company_name - type: string - facet: false - - name: country - type: string - facet: true - items: - $ref: "#/components/schemas/Field" - default_sorting_field: - type: string - description: - The name of an int32 / float field that determines the order in which - the search results are ranked when a sort_by clause is not provided during - searching. This field must indicate some kind of popularity. - example: num_employees # Go with the first field name listed above to produce sane defaults - default: "" - token_separators: - type: array - description: > - List of symbols or special characters to be used for - splitting the text into individual words in addition to space and new-line characters. - items: - type: string # characters only - # Could `enum` be used instead, given it's symbols/special *characters*, e.g.: - # enum: ["@", "!", ".", "/", ","] - minLength: 1 - maxLength: 1 - default: [] - enable_nested_fields: - type: boolean - description: - Enables experimental support at a collection level for nested object or object array fields. - This field is only available if the Typesense server is version `0.24.0.rcn34` or later. - default: false - example: true - symbols_to_index: - type: array - description: > - List of symbols or special characters to be indexed. - items: - type: string # characters only - # Could `enum` be used instead, given it's symbols/special *characters*, e.g.: - # enum: ["@", "!", ".", "/", ","] - minLength: 1 - maxLength: 1 - default: [] - CollectionUpdateSchema: - required: - - fields - type: object - properties: - fields: - type: array - description: A list of fields for querying, filtering and faceting - example: - - name: company_name - type: string - facet: false - - name: num_employees - type: int32 - facet: false - - name: country - type: string - facet: true - items: - $ref: "#/components/schemas/Field" - CollectionResponse: - allOf: - - $ref: "#/components/schemas/CollectionSchema" - - type: object - required: - - num_documents - - created_at - properties: - num_documents: - type: integer - description: Number of documents in the collection - format: int64 - readOnly: true - created_at: - type: integer - description: Timestamp of when the collection was created (Unix epoch in seconds) - format: int64 - readOnly: true - Field: - required: - - name - - type - type: object - properties: - name: - type: string - example: company_name - type: - type: string - example: string - optional: - type: boolean - example: true - facet: - type: boolean - example: false - index: - type: boolean - example: true - default: true - locale: - type: string - example: el - sort: - type: boolean - example: true - infix: - type: boolean - example: true - default: false - num_dim: - type: integer - example: 256 - drop: - type: boolean - example: true - # omitting default value since we want it to be null - embed: - type: object - required: - - from - - model_config - properties: - from: - type: array - items: - type: string - model_config: - type: object - required: - - model_name - properties: - model_name: - type: string - api_key: - type: string - access_token: - type: string - client_id: - type: string - client_secret: - type: string - project_id: - type: string - CollectionAliasSchema: - type: object - required: - - collection_name - properties: - collection_name: - type: string - description: Name of the collection you wish to map the alias to - CollectionAlias: - type: object - required: - - collection_name - - name - properties: - name: - type: string - readOnly: true - description: Name of the collection alias - collection_name: - type: string - description: Name of the collection the alias mapped to - CollectionAliasesResponse: - type: object - required: - - aliases - properties: - aliases: - type: array - x-go-type: "[]*CollectionAlias" - items: - $ref: "#/components/schemas/CollectionAlias" - SearchResult: - type: object - properties: - facet_counts: - type: array - items: - $ref: "#/components/schemas/FacetCounts" - found: - type: integer - description: The number of documents found - search_time_ms: - type: integer - description: The number of milliseconds the search took - out_of: - type: integer - description: The total number of documents in the collection - search_cutoff: - type: boolean - description: Whether the search was cut off - page: - type: integer - description: The search result page number - grouped_hits: - type: array - items: - $ref: "#/components/schemas/SearchGroupedHit" - hits: - type: array - description: The documents that matched the search query - items: - $ref: "#/components/schemas/SearchResultHit" - request_params: - type: object - required: - - collection_name - - q - - per_page - properties: - collection_name: - type: string - q: - type: string - per_page: - type: integer - - SearchGroupedHit: - type: object - required: - - group_key - - hits - properties: - found: - type: integer - group_key: - type: array - items: {} - hits: - type: array - description: The documents that matched the search query - items: - $ref: "#/components/schemas/SearchResultHit" - SearchResultHit: - type: object - properties: - highlights: - type: array - description: (Deprecated) Contains highlighted portions of the search fields - items: - $ref: "#/components/schemas/SearchHighlight" - highlight: - type: object - description: Highlighted version of the matching document - additionalProperties: true - document: - type: object - description: Can be any key-value pair - additionalProperties: - type: object - text_match: - type: integer - format: int64 - geo_distance_meters: - type: object - description: Can be any key-value pair - additionalProperties: - type: integer - vector_distance: - type: number - format: float - description: Distance between the query vector and matching document's vector value - example: - highlights: - company_name: - field: company_name - snippet: Stark Industries - document: - id: "124" - company_name: Stark Industries - num_employees: 5215 - country: USA - text_match: 1234556 - SearchHighlight: - type: object - properties: - field: - type: string - example: company_name - snippet: - type: string - description: Present only for (non-array) string fields - example: Stark Industries - snippets: - type: array - description: Present only for (array) string[] fields - example: - - Stark Industries - - Stark Corp - items: - type: string - value: - type: string - description: Full field value with highlighting, present only for (non-array) string fields - example: Stark Industries is a major supplier of space equipment. - values: - type: array - description: Full field value with highlighting, present only for (array) string[] fields - example: - - Stark Industries - - Stark Corp - items: - type: string - indices: - type: array - description: The indices property will be present only for string[] - fields and will contain the corresponding indices of the snippets - in the search field - example: 1 - items: - type: integer - matched_tokens: - type: array - items: - type: object - x-go-type: "interface{}" - SearchOverrideSchema: - type: object - required: - - rule - properties: - rule: - $ref: "#/components/schemas/SearchOverrideRule" - includes: - type: array - description: - List of document `id`s that should be included in the search results with their - corresponding `position`s. - items: - $ref: "#/components/schemas/SearchOverrideInclude" - excludes: - type: array - description: List of document `id`s that should be excluded from the search results. - items: - $ref: "#/components/schemas/SearchOverrideExclude" - filter_by: - type: string - description: > - A filter by clause that is applied to any search query that matches the override rule. - remove_matched_tokens: - type: boolean - description: > - Indicates whether search query tokens that exist in the override's rule should be removed from the search query. - SearchOverride: - allOf: - - $ref: "#/components/schemas/SearchOverrideSchema" - - type: object - required: - - id - properties: - id: - type: string - readOnly: true - SearchOverrideRule: - type: object - required: - - query - - match - properties: - query: - type: string - description: Indicates what search queries should be overridden - match: - type: string - description: > - Indicates whether the match on the query term should be `exact` or `contains`. - If we want to match all queries that contained - the word `apple`, we will use the `contains` match instead. - enum: - - exact - - contains - SearchOverrideInclude: - type: object - required: - - id - - position - properties: - id: - type: string - description: document id that should be included - position: - type: integer - description: position number where document should be included in the search results - SearchOverrideExclude: - type: object - required: - - id - properties: - id: - type: string - description: document id that should be excluded from the search results. - SearchOverridesResponse: - type: object - required: - - overrides - properties: - overrides: - type: array - x-go-type: "[]*SearchOverride" - items: - $ref: "#/components/schemas/SearchOverride" - SearchSynonymSchema: - type: object - required: - - synonyms - properties: - root: - type: string - description: For 1-way synonyms, indicates the root word that words in the `synonyms` parameter map to. - synonyms: - type: array - description: Array of words that should be considered as synonyms. - items: - type: string - SearchSynonym: - allOf: - - $ref: "#/components/schemas/SearchSynonymSchema" - - type: object - required: - - id - properties: - id: - type: string - readOnly: true - SearchSynonymsResponse: - type: object - required: - - synonyms - properties: - synonyms: - type: array - x-go-type: "[]*SearchSynonym" - items: - $ref: "#/components/schemas/SearchSynonym" - HealthStatus: - type: object - required: - - ok - properties: - ok: - type: boolean - SuccessStatus: - type: object - required: - - success - properties: - success: - type: boolean - ApiResponse: - type: object - required: - - message - properties: - message: - type: string - ApiKeySchema: - type: object - required: - - actions - - collections - - description - properties: - value: - type: string - description: - type: string - actions: - type: array - items: - type: string - collections: - type: array - items: - type: string - expires_at: - type: integer - format: int64 - ApiKey: - allOf: - - $ref: "#/components/schemas/ApiKeySchema" - - type: object - properties: - id: - type: integer - format: int64 - readOnly: true - value_prefix: - type: string - readOnly: true - ApiKeysResponse: - type: object - required: - - keys - properties: - keys: - type: array - x-go-type: "[]*ApiKey" - items: - $ref: "#/components/schemas/ApiKey" - ScopedKeyParameters: - type: object - properties: - filter_by: - type: string - expires_at: - type: integer - format: int64 - SnapshotParameters: - type: object - properties: - snapshot_path: - type: string - ErrorResponse: - type: object - properties: - message: - type: string - MultiSearchResult: - type: object - required: - - results - properties: - results: - type: array - items: - $ref: "#/components/schemas/SearchResult" - SearchParameters: - type: object - required: - - q - - query_by - - properties: - q: - description: The query text to search for in the collection. - Use * as the search string to return all documents. - This is typically useful when used in conjunction with filter_by. - type: string - - query_by: - description: A list of `string` fields that should be queried - against. Multiple fields are separated with a comma. - type: string - - query_by_weights: - description: - The relative weight to give each `query_by` field when ranking results. - This can be used to boost fields in priority, when looking for matches. - Multiple fields are separated with a comma. - type: string - - text_match_type: - description: - In a multi-field matching context, this parameter determines how the representative text match - score of a record is calculated. Possible values are max_score (default) or max_weight. - type: string - - prefix: - description: - Boolean field to indicate that the last word in the query should - be treated as a prefix, and not as a whole word. This is used for building - autocomplete and instant search interfaces. Defaults to true. - type: string - - infix: - description: - If infix index is enabled for this field, infix searching can be done on a per-field - basis by sending a comma separated string parameter called infix to the search query. - This parameter can have 3 values; `off` infix search is disabled, which is default - `always` infix search is performed along with regular search - `fallback` infix search is performed if regular search does not produce results - type: string - - max_extra_prefix: - description: - There are also 2 parameters that allow you to control the extent of infix searching - max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before - or after the query that can be present in the token. For example query "K2100" has 2 extra - symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. - type: integer - - max_extra_suffix: - description: - There are also 2 parameters that allow you to control the extent of infix searching - max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before - or after the query that can be present in the token. For example query "K2100" has 2 extra - symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. - type: integer - - filter_by: - description: - Filter conditions for refining youropen api validator search results. Separate - multiple conditions with &&. - type: string - example: "num_employees:>100 && country: [USA, UK]" - - sort_by: - description: - A list of numerical fields and their corresponding sort orders - that will be used for ordering your results. - Up to 3 sort fields can be specified. - The text similarity score is exposed as a special `_text_match` field that - you can use in the list of sorting fields. - If no `sort_by` parameter is specified, results are sorted by - `_text_match:desc,default_sorting_field:desc` - type: string - example: num_employees:desc - - facet_by: - description: - A list of fields that will be used for faceting your results - on. Separate multiple fields with a comma. - type: string - - max_facet_values: - description: Maximum number of facet values to be returned. - type: integer - - facet_query: - description: - Facet values that are returned can now be filtered via this parameter. - The matching facet text is also highlighted. For example, when faceting - by `category`, you can set `facet_query=category:shoe` to return only - facet values that contain the prefix "shoe". - type: string - - num_typos: - description: > - The number of typographical errors (1 or 2) that would be tolerated. - Default: 2 - type: string - - page: - description: Results from this specific page number would be fetched. - type: integer - - per_page: - description: "Number of results to fetch per page. Default: 10" - type: integer - - limit: - description: > - Number of hits to fetch. Can be used as an alternative to the per_page parameter. - Default: 10. - type: integer - - offset: - description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. - type: integer - - group_by: - description: - You can aggregate search results into groups or buckets by specify - one or more `group_by` fields. Separate multiple fields with a comma. - To group on a particular field, it must be a faceted field. - type: string - - group_limit: - description: > - Maximum number of hits to be returned for every group. If the `group_limit` is - set as `K` then only the top K hits in each group are returned in the response. - Default: 3 - type: integer - - include_fields: - description: List of fields from the document to include in the search result - type: string - - exclude_fields: - description: List of fields from the document to exclude in the search result - type: string - - highlight_full_fields: - description: List of fields which should be highlighted fully without snippeting - type: string - - highlight_affix_num_tokens: - description: > - The number of tokens that should surround the highlighted text on each side. - Default: 4 - type: integer - - highlight_start_tag: - description: > - The start tag used for the highlighted snippets. - Default: `` - type: string - highlight_end_tag: - description: > - The end tag used for the highlighted snippets. - Default: `` - type: string - - enable_highlight_v1: - description: > - Flag for enabling/disabling the deprecated, old highlight structure in the response. - Default: true - type: boolean - default: true - - snippet_threshold: - description: > - Field values under this length will be fully highlighted, instead of showing - a snippet of relevant portion. Default: 30 - type: integer - - drop_tokens_threshold: - description: > - If the number of results found for a specific query is less than - this number, Typesense will attempt to drop the tokens in the query until - enough results are found. Tokens that have the least individual hits - are dropped first. Set to 0 to disable. Default: 10 - type: integer - typo_tokens_threshold: - description: > - If the number of results found for a specific query is less than this number, - Typesense will attempt to look for tokens with more typos until - enough results are found. Default: 100 - type: integer - - pinned_hits: - description: > - A list of records to unconditionally include in the search results - at specific positions. An example use case would be to feature or promote - certain items on the top of search results. - A list of `record_id:hit_position`. Eg: to include a record with ID 123 - at Position 1 and another record with ID 456 at Position 5, - you'd specify `123:1,456:5`. - - You could also use the Overrides feature to override search results based - on rules. Overrides are applied first, followed by `pinned_hits` and - finally `hidden_hits`. - type: string - - hidden_hits: - description: > - A list of records to unconditionally hide from search results. - A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, - you'd specify `123,456`. - - You could also use the Overrides feature to override search results based - on rules. Overrides are applied first, followed by `pinned_hits` and - finally `hidden_hits`. - type: string - - highlight_fields: - description: > - A list of custom fields that must be highlighted even if you don't query - for them - type: string - - split_join_tokens: - description: > - Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. - Splitting/joining of tokens will only be attempted if the original query produces no results. - To always trigger this behavior, set value to `always``. - To disable, set value to `off`. Default is `fallback`. - type: string - - pre_segmented_query: - description: > - You can index content from any logographic language into Typesense if you - are able to segment / split the text into space-separated words yourself - before indexing and querying. - - Set this parameter to true to do the same - type: boolean - - preset: - description: > - Search using a bunch of search parameters by setting this parameter to - the name of the existing Preset. - type: string - - enable_overrides: - description: > - If you have some overrides defined but want to disable all of them during - query time, you can do that by setting this parameter to false - type: boolean - - prioritize_exact_match: - description: > - Set this parameter to true to ensure that an exact match is ranked above - the others - type: boolean - max_candidates: - description: > - Control the number of words that Typesense considers for typo and prefix searching. - type: integer - prioritize_token_position: - description: > - Make Typesense prioritize documents where the query words appear earlier in the text. - type: boolean - exhaustive_search: - description: > - Setting this to true will make Typesense consider all prefixes and typo - corrections of the words in the query without stopping early when enough results are found - (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). - type: boolean - search_cutoff_ms: - description: > - Typesense will attempt to return results early if the cutoff time has elapsed. - This is not a strict guarantee and facet computation is not bound by this parameter. - type: integer - use_cache: - description: > - Enable server side caching of search query results. By default, caching is disabled. - type: boolean - cache_ttl: - description: > - The duration (in seconds) that determines how long the search query is cached. - This value can be set on a per-query basis. Default: 60. - type: integer - min_len_1typo: - description: > - Minimum word length for 1-typo correction to be applied. - The value of num_typos is still treated as the maximum allowed typos. - type: integer - min_len_2typo: - description: > - Minimum word length for 2-typo correction to be applied. - The value of num_typos is still treated as the maximum allowed typos. - type: integer - vector_query: - description: > - Vector query expression for fetching documents "closest" to a given query/document vector. - type: string - remote_embedding_timeout_ms: - description: > - Timeout (in milliseconds) for fetching remote embeddings. - type: integer - remote_embedding_num_tries: - description: > - Number of times to retry fetching remote embeddings. - type: integer - - MultiSearchParameters: - description: > - Parameters for the multi search API. - type: object - properties: - q: - description: The query text to search for in the collection. - Use * as the search string to return all documents. - This is typically useful when used in conjunction with filter_by. - type: string - - query_by: - description: A list of `string` fields that should be queried - against. Multiple fields are separated with a comma. - type: string - - query_by_weights: - description: - The relative weight to give each `query_by` field when ranking results. - This can be used to boost fields in priority, when looking for matches. - Multiple fields are separated with a comma. - type: string - - text_match_type: - description: - In a multi-field matching context, this parameter determines how the representative text match - score of a record is calculated. Possible values are max_score (default) or max_weight. - type: string - - prefix: - description: - Boolean field to indicate that the last word in the query should - be treated as a prefix, and not as a whole word. This is used for building - autocomplete and instant search interfaces. Defaults to true. - type: string - - infix: - description: - If infix index is enabled for this field, infix searching can be done on a per-field - basis by sending a comma separated string parameter called infix to the search query. - This parameter can have 3 values; `off` infix search is disabled, which is default - `always` infix search is performed along with regular search - `fallback` infix search is performed if regular search does not produce results - type: string - - max_extra_prefix: - description: - There are also 2 parameters that allow you to control the extent of infix searching - max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before - or after the query that can be present in the token. For example query "K2100" has 2 extra - symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. - type: integer - - max_extra_suffix: - description: - There are also 2 parameters that allow you to control the extent of infix searching - max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before - or after the query that can be present in the token. For example query "K2100" has 2 extra - symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. - type: integer - - filter_by: - description: - Filter conditions for refining youropen api validator search results. Separate - multiple conditions with &&. - type: string - example: "num_employees:>100 && country: [USA, UK]" - - sort_by: - description: - A list of numerical fields and their corresponding sort orders - that will be used for ordering your results. - Up to 3 sort fields can be specified. - The text similarity score is exposed as a special `_text_match` field that - you can use in the list of sorting fields. - If no `sort_by` parameter is specified, results are sorted by - `_text_match:desc,default_sorting_field:desc` - type: string - - facet_by: - description: - A list of fields that will be used for faceting your results - on. Separate multiple fields with a comma. - type: string - - max_facet_values: - description: Maximum number of facet values to be returned. - type: integer - - facet_query: - description: - Facet values that are returned can now be filtered via this parameter. - The matching facet text is also highlighted. For example, when faceting - by `category`, you can set `facet_query=category:shoe` to return only - facet values that contain the prefix "shoe". - type: string - - num_typos: - description: > - The number of typographical errors (1 or 2) that would be tolerated. - Default: 2 - type: string - - page: - description: Results from this specific page number would be fetched. - type: integer - - per_page: - description: "Number of results to fetch per page. Default: 10" - type: integer - - limit: - description: > - Number of hits to fetch. Can be used as an alternative to the per_page parameter. - Default: 10. - type: integer - - offset: - description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. - type: integer - - group_by: - description: - You can aggregate search results into groups or buckets by specify - one or more `group_by` fields. Separate multiple fields with a comma. - To group on a particular field, it must be a faceted field. - type: string - - group_limit: - description: > - Maximum number of hits to be returned for every group. If the `group_limit` is - set as `K` then only the top K hits in each group are returned in the response. - Default: 3 - type: integer - - include_fields: - description: List of fields from the document to include in the search result - type: string - - exclude_fields: - description: List of fields from the document to exclude in the search result - type: string - - highlight_full_fields: - description: List of fields which should be highlighted fully without snippeting - type: string - - highlight_affix_num_tokens: - description: > - The number of tokens that should surround the highlighted text on each side. - Default: 4 - type: integer - - highlight_start_tag: - description: > - The start tag used for the highlighted snippets. - Default: `` - type: string - highlight_end_tag: - description: > - The end tag used for the highlighted snippets. - Default: `` - type: string - - snippet_threshold: - description: > - Field values under this length will be fully highlighted, instead of showing - a snippet of relevant portion. Default: 30 - type: integer - - drop_tokens_threshold: - description: > - If the number of results found for a specific query is less than - this number, Typesense will attempt to drop the tokens in the query until - enough results are found. Tokens that have the least individual hits - are dropped first. Set to 0 to disable. Default: 10 - type: integer - typo_tokens_threshold: - description: > - If the number of results found for a specific query is less than this number, - Typesense will attempt to look for tokens with more typos until - enough results are found. Default: 100 - type: integer - - pinned_hits: - description: > - A list of records to unconditionally include in the search results - at specific positions. An example use case would be to feature or promote - certain items on the top of search results. - A list of `record_id:hit_position`. Eg: to include a record with ID 123 - at Position 1 and another record with ID 456 at Position 5, - you'd specify `123:1,456:5`. - - You could also use the Overrides feature to override search results based - on rules. Overrides are applied first, followed by `pinned_hits` and - finally `hidden_hits`. - type: string - - hidden_hits: - description: > - A list of records to unconditionally hide from search results. - A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, - you'd specify `123,456`. - - You could also use the Overrides feature to override search results based - on rules. Overrides are applied first, followed by `pinned_hits` and - finally `hidden_hits`. - type: string - - highlight_fields: - description: > - A list of custom fields that must be highlighted even if you don't query - for them - type: string - - pre_segmented_query: - description: > - You can index content from any logographic language into Typesense if you - are able to segment / split the text into space-separated words yourself - before indexing and querying. - - Set this parameter to true to do the same - type: boolean - - preset: - description: > - Search using a bunch of search parameters by setting this parameter to - the name of the existing Preset. - type: string - - enable_overrides: - description: > - If you have some overrides defined but want to disable all of them during - query time, you can do that by setting this parameter to false - type: boolean - - prioritize_exact_match: - description: > - Set this parameter to true to ensure that an exact match is ranked above - the others - type: boolean - exhaustive_search: - description: > - Setting this to true will make Typesense consider all prefixes and typo - corrections of the words in the query without stopping early when enough results are found - (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). - type: boolean - search_cutoff_ms: - description: > - Typesense will attempt to return results early if the cutoff time has elapsed. - This is not a strict guarantee and facet computation is not bound by this parameter. - type: integer - use_cache: - description: > - Enable server side caching of search query results. By default, caching is disabled. - type: boolean - cache_ttl: - description: > - The duration (in seconds) that determines how long the search query is cached. - This value can be set on a per-query basis. Default: 60. - type: integer - min_len_1typo: - description: > - Minimum word length for 1-typo correction to be applied. - The value of num_typos is still treated as the maximum allowed typos. - type: integer - min_len_2typo: - description: > - Minimum word length for 2-typo correction to be applied. - The value of num_typos is still treated as the maximum allowed typos. - type: integer - vector_query: - description: > - Vector query expression for fetching documents "closest" to a given query/document vector. - type: string - remote_embedding_timeout_ms: - description: > - Timeout (in milliseconds) for fetching remote embeddings. - type: integer - remote_embedding_num_tries: - description: > - Number of times to retry fetching remote embeddings. - type: integer - MultiSearchSearchesParameter: - type: object - required: - - searches - properties: - searches: - type: array - items: - $ref: "#/components/schemas/MultiSearchCollectionParameters" - MultiSearchCollectionParameters: - allOf: - - $ref: "#/components/schemas/MultiSearchParameters" - - type: object - required: - - collection - properties: - collection: - type: string - description: > - The collection to search in. - FacetCounts: - type: object - properties: - counts: - type: array - items: - type: object - properties: - count: - type: integer - highlighted: - type: string - value: - type: string - field_name: - type: string - stats: - type: object - properties: - max: - type: number - format: double - min: - type: number - format: double - sum: - type: number - format: double - total_values: - type: integer - avg: - type: number - format: double - AnalyticsRuleSchema: - type: object - required: - - name - - type - - params - properties: - name: - type: string - type: - type: string - params: - $ref: "#/components/schemas/AnalyticsRuleParameters" - AnalyticsRuleParameters: - type: object - required: - - source - - destination - - limit - properties: - source: - type: object - properties: - collections: - type: array - items: - type: string - destination: - type: object - properties: - collection: - type: string - limit: - type: integer - AnalyticsRulesRetrieveSchema: - type: object - properties: - rules: - type: array - items: - $ref: "#/components/schemas/AnalyticsRuleSchema" - securitySchemes: - api_key_header: - type: apiKey - name: X-TYPESENSE-API-KEY - in: header diff --git a/typesense-go-unwrapped-api-spec.yaml b/typesense-go-unwrapped-api-spec.yaml new file mode 100644 index 0000000..c9b3c18 --- /dev/null +++ b/typesense-go-unwrapped-api-spec.yaml @@ -0,0 +1,3963 @@ +components: + schemas: + APIStatsResponse: + properties: + delete_latency_ms: + format: double + type: number + delete_requests_per_second: + format: double + type: number + import_latency_ms: + format: double + type: number + import_requests_per_second: + format: double + type: number + latency_ms: + type: object + x-go-type: map[string]float64 + overloaded_requests_per_second: + format: double + type: number + pending_write_batches: + format: double + type: number + requests_per_second: + type: object + x-go-type: map[string]float64 + search_latency_ms: + format: double + type: number + search_requests_per_second: + format: double + type: number + total_requests_per_second: + format: double + type: number + write_latency_ms: + format: double + type: number + write_requests_per_second: + format: double + type: number + type: object + AnalyticsEventCreateResponse: + properties: + ok: + type: boolean + required: + - ok + type: object + AnalyticsEventCreateSchema: + properties: + data: + type: object + name: + type: string + type: + type: string + required: + - type + - name + - data + type: object + AnalyticsRuleDeleteResponse: + properties: + name: + type: string + required: + - name + type: object + AnalyticsRuleParameters: + properties: + destination: + $ref: '#/components/schemas/AnalyticsRuleParametersDestination' + expand_query: + type: boolean + limit: + type: integer + source: + $ref: '#/components/schemas/AnalyticsRuleParametersSource' + required: + - source + - destination + type: object + AnalyticsRuleParametersDestination: + properties: + collection: + type: string + counter_field: + type: string + required: + - collection + type: object + AnalyticsRuleParametersSource: + properties: + collections: + items: + type: string + type: array + events: + items: + properties: + name: + type: string + type: + type: string + weight: + format: float + type: number + required: + - type + - weight + - name + type: object + type: array + required: + - collections + type: object + AnalyticsRuleSchema: + allOf: + - $ref: '#/components/schemas/AnalyticsRuleUpsertSchema' + - properties: + name: + type: string + required: + - name + type: object + AnalyticsRuleUpsertSchema: + properties: + params: + $ref: '#/components/schemas/AnalyticsRuleParameters' + type: + enum: + - popular_queries + - nohits_queries + - counter + type: string + required: + - type + - params + type: object + AnalyticsRulesRetrieveSchema: + properties: + rules: + items: + $ref: '#/components/schemas/AnalyticsRuleSchema' + type: array + x-go-type: '[]*AnalyticsRuleSchema' + type: object + ApiKey: + allOf: + - $ref: '#/components/schemas/ApiKeySchema' + - properties: + id: + format: int64 + readOnly: true + type: integer + value_prefix: + readOnly: true + type: string + type: object + ApiKeyDeleteResponse: + properties: + id: + description: The id of the API key that was deleted + format: int64 + type: integer + required: + - id + type: object + ApiKeySchema: + properties: + actions: + items: + type: string + type: array + collections: + items: + type: string + type: array + description: + type: string + expires_at: + format: int64 + type: integer + value: + type: string + required: + - actions + - collections + - description + type: object + ApiKeysResponse: + properties: + keys: + items: + $ref: '#/components/schemas/ApiKey' + type: array + x-go-type: '[]*ApiKey' + required: + - keys + type: object + ApiResponse: + properties: + message: + type: string + required: + - message + type: object + CollectionAlias: + properties: + collection_name: + description: Name of the collection the alias mapped to + type: string + name: + description: Name of the collection alias + readOnly: true + type: string + required: + - collection_name + - name + type: object + CollectionAliasSchema: + properties: + collection_name: + description: Name of the collection you wish to map the alias to + type: string + required: + - collection_name + type: object + CollectionAliasesResponse: + properties: + aliases: + items: + $ref: '#/components/schemas/CollectionAlias' + type: array + x-go-type: '[]*CollectionAlias' + required: + - aliases + type: object + CollectionResponse: + allOf: + - $ref: '#/components/schemas/CollectionSchema' + - properties: + created_at: + description: Timestamp of when the collection was created (Unix epoch in seconds) + format: int64 + readOnly: true + type: integer + num_documents: + description: Number of documents in the collection + format: int64 + readOnly: true + type: integer + required: + - num_documents + - created_at + type: object + CollectionSchema: + properties: + default_sorting_field: + default: "" + description: The name of an int32 / float field that determines the order in which the search results are ranked when a sort_by clause is not provided during searching. This field must indicate some kind of popularity. + example: num_employees + type: string + enable_nested_fields: + default: false + description: Enables experimental support at a collection level for nested object or object array fields. This field is only available if the Typesense server is version `0.24.0.rcn34` or later. + example: true + type: boolean + fields: + description: A list of fields for querying, filtering and faceting + example: + - facet: false + name: num_employees + type: int32 + - facet: false + name: company_name + type: string + - facet: true + name: country + type: string + items: + $ref: '#/components/schemas/Field' + type: array + name: + description: Name of the collection + example: companies + type: string + symbols_to_index: + default: [] + description: | + List of symbols or special characters to be indexed. + items: + maxLength: 1 + minLength: 1 + type: string + type: array + token_separators: + default: [] + description: | + List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. + items: + maxLength: 1 + minLength: 1 + type: string + type: array + voice_query_model: + $ref: '#/components/schemas/VoiceQueryModelCollectionConfig' + required: + - name + - fields + type: object + CollectionUpdateSchema: + properties: + fields: + description: A list of fields for querying, filtering and faceting + example: + - facet: false + name: company_name + type: string + - facet: false + name: num_employees + type: int32 + - facet: true + name: country + type: string + items: + $ref: '#/components/schemas/Field' + type: array + required: + - fields + type: object + ConversationModelCreateSchema: + allOf: + - $ref: '#/components/schemas/ConversationModelUpdateSchema' + - properties: + history_collection: + description: Typesense collection that stores the historical conversations + type: string + max_bytes: + description: | + The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. + type: integer + model_name: + description: Name of the LLM model offered by OpenAI, Cloudflare or vLLM + type: string + required: + - model_name + - max_bytes + - history_collection + type: object + required: + - model_name + - max_bytes + ConversationModelSchema: + allOf: + - $ref: '#/components/schemas/ConversationModelCreateSchema' + - properties: + id: + description: An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. + type: string + required: + - id + type: object + ConversationModelUpdateSchema: + properties: + account_id: + description: LLM service's account ID (only applicable for Cloudflare) + type: string + api_key: + description: The LLM service's API Key + type: string + history_collection: + description: Typesense collection that stores the historical conversations + type: string + id: + description: An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. + type: string + max_bytes: + description: | + The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. + type: integer + model_name: + description: Name of the LLM model offered by OpenAI, Cloudflare or vLLM + type: string + system_prompt: + description: The system prompt that contains special instructions to the LLM + type: string + ttl: + description: | + Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) + type: integer + vllm_url: + description: URL of vLLM service + type: string + type: object + DirtyValues: + enum: + - coerce_or_reject + - coerce_or_drop + - drop + - reject + type: string + DocumentIndexParameters: + properties: + dirty_values: + $ref: '#/components/schemas/DirtyValues' + type: object + DropTokensMode: + description: | + Dictates the direction in which the words in the query must be dropped when the original words in the query do not appear in any document. Values: right_to_left (default), left_to_right, both_sides:3 A note on both_sides:3 - for queries upto 3 tokens (words) in length, this mode will drop tokens from both sides and exhaustively rank all matching results. If query length is greater than 3 words, Typesense will just fallback to default behavior of right_to_left + enum: + - right_to_left + - left_to_right + - both_sides:3 + type: string + ErrorResponse: + properties: + message: + type: string + type: object + FacetCounts: + properties: + counts: + items: + properties: + count: + type: integer + highlighted: + type: string + parent: + type: object + value: + type: string + type: object + type: array + field_name: + type: string + stats: + properties: + avg: + format: double + type: number + max: + format: double + type: number + min: + format: double + type: number + sum: + format: double + type: number + total_values: + type: integer + type: object + type: object + Field: + properties: + drop: + example: true + type: boolean + embed: + properties: + from: + items: + type: string + type: array + model_config: + properties: + access_token: + type: string + api_key: + type: string + client_id: + type: string + client_secret: + type: string + indexing_prefix: + type: string + model_name: + type: string + project_id: + type: string + query_prefix: + type: string + refresh_token: + type: string + url: + type: string + required: + - model_name + type: object + required: + - from + - model_config + type: object + facet: + example: false + type: boolean + index: + default: true + example: true + type: boolean + infix: + default: false + example: true + type: boolean + locale: + example: el + type: string + name: + example: company_name + type: string + num_dim: + example: 256 + type: integer + optional: + example: true + type: boolean + range_index: + description: | + Enables an index optimized for range filtering on numerical fields (e.g. rating:>3.5). Default: false. + type: boolean + reference: + description: | + Name of a field in another collection that should be linked to this collection so that it can be joined during query. + type: string + sort: + example: true + type: boolean + stem: + description: | + Values are stemmed before indexing in-memory. Default: false. + type: boolean + stem_dictionary: + description: Name of the stemming dictionary to use for this field + example: irregular-plurals + type: string + store: + description: | + When set to false, the field value will not be stored on disk. Default: true. + type: boolean + symbols_to_index: + default: [] + description: | + List of symbols or special characters to be indexed. + items: + maxLength: 1 + minLength: 1 + type: string + type: array + token_separators: + default: [] + description: | + List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. + items: + maxLength: 1 + minLength: 1 + type: string + type: array + type: + example: string + type: string + vec_dist: + description: | + The distance metric to be used for vector search. Default: `cosine`. You can also use `ip` for inner product. + type: string + required: + - name + - type + type: object + HealthStatus: + properties: + ok: + type: boolean + required: + - ok + type: object + IndexAction: + enum: + - create + - update + - upsert + - emplace + type: string + MultiSearchCollectionParameters: + allOf: + - $ref: '#/components/schemas/MultiSearchParameters' + - properties: + collection: + description: | + The collection to search in. + type: string + rerank_hybrid_matches: + default: false + description: | + When true, computes both text match and vector distance scores for all matches in hybrid search. Documents found only through keyword search will get a vector distance score, and documents found only through vector search will get a text match score. + type: boolean + x-typesense-api-key: + description: A separate search API key for each search within a multi_search request + type: string + type: object + MultiSearchParameters: + description: | + Parameters for the multi search API. + properties: + cache_ttl: + description: | + The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. + type: integer + conversation: + description: | + Enable conversational search. + type: boolean + conversation_id: + description: | + The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. + type: string + conversation_model_id: + description: | + The Id of Conversation Model to be used. + type: string + drop_tokens_mode: + $ref: '#/components/schemas/DropTokensMode' + drop_tokens_threshold: + description: | + If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 + type: integer + enable_overrides: + default: false + description: | + If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false + type: boolean + enable_synonyms: + description: | + If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true + type: boolean + enable_typos_for_alpha_numerical_tokens: + description: | + Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. + type: boolean + enable_typos_for_numerical_tokens: + default: true + description: | + Make Typesense disable typos for numerical tokens. + type: boolean + exclude_fields: + description: List of fields from the document to exclude in the search result + type: string + exhaustive_search: + description: | + Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). + type: boolean + facet_by: + description: A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. + type: string + facet_query: + description: Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix "shoe". + type: string + facet_return_parent: + description: | + Comma separated string of nested facet fields whose parent object should be returned in facet response. + type: string + facet_strategy: + description: | + Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). + type: string + filter_by: + description: Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. + example: 'num_employees:>100 && country: [USA, UK]' + type: string + filter_curated_hits: + description: | + Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false + type: boolean + group_by: + description: You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. + type: string + group_limit: + description: | + Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 + type: integer + group_missing_values: + description: | + Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true + type: boolean + hidden_hits: + description: | + A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. + You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + type: string + highlight_affix_num_tokens: + description: | + The number of tokens that should surround the highlighted text on each side. Default: 4 + type: integer + highlight_end_tag: + description: | + The end tag used for the highlighted snippets. Default: `` + type: string + highlight_fields: + description: | + A list of custom fields that must be highlighted even if you don't query for them + type: string + highlight_full_fields: + description: List of fields which should be highlighted fully without snippeting + type: string + highlight_start_tag: + description: | + The start tag used for the highlighted snippets. Default: `` + type: string + include_fields: + description: List of fields from the document to include in the search result + type: string + infix: + description: If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results + type: string + limit: + description: | + Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. + type: integer + max_extra_prefix: + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + type: integer + max_extra_suffix: + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + type: integer + max_facet_values: + description: Maximum number of facet values to be returned. + type: integer + min_len_1typo: + description: | + Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + type: integer + min_len_2typo: + description: | + Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + type: integer + num_typos: + description: | + The number of typographical errors (1 or 2) that would be tolerated. Default: 2 + type: string + offset: + description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. + type: integer + override_tags: + description: Comma separated list of tags to trigger the curations rules that match the tags. + type: string + page: + description: Results from this specific page number would be fetched. + type: integer + per_page: + description: 'Number of results to fetch per page. Default: 10' + type: integer + pinned_hits: + description: | + A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. + You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + type: string + pre_segmented_query: + default: false + description: | + You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. + Set this parameter to true to do the same + type: boolean + prefix: + description: Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. + type: string + preset: + description: | + Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. + type: string + prioritize_exact_match: + default: true + description: | + Set this parameter to true to ensure that an exact match is ranked above the others + type: boolean + prioritize_num_matching_fields: + default: true + description: | + Make Typesense prioritize documents where the query words appear in more number of fields. + type: boolean + prioritize_token_position: + default: false + description: | + Make Typesense prioritize documents where the query words appear earlier in the text. + type: boolean + q: + description: The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. + type: string + query_by: + description: A list of `string` fields that should be queried against. Multiple fields are separated with a comma. + type: string + query_by_weights: + description: The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. + type: string + remote_embedding_num_tries: + description: | + Number of times to retry fetching remote embeddings. + type: integer + remote_embedding_timeout_ms: + description: | + Timeout (in milliseconds) for fetching remote embeddings. + type: integer + search_cutoff_ms: + description: | + Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. + type: integer + snippet_threshold: + description: | + Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 + type: integer + sort_by: + description: A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` + type: string + stopwords: + description: | + Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. + type: string + synonym_num_typos: + description: | + Allow synonym resolution on typo-corrected words in the query. Default: 0 + type: integer + synonym_prefix: + description: | + Allow synonym resolution on word prefixes in the query. Default: false + type: boolean + text_match_type: + description: In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. + type: string + typo_tokens_threshold: + description: | + If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 + type: integer + use_cache: + description: | + Enable server side caching of search query results. By default, caching is disabled. + type: boolean + vector_query: + description: | + Vector query expression for fetching documents "closest" to a given query/document vector. + type: string + voice_query: + description: | + The base64 encoded audio file in 16 khz 16-bit WAV format. + type: string + type: object + MultiSearchResult: + properties: + conversation: + $ref: '#/components/schemas/SearchResultConversation' + results: + items: + $ref: '#/components/schemas/MultiSearchResultItem' + type: array + required: + - results + type: object + MultiSearchResultItem: + allOf: + - $ref: '#/components/schemas/SearchResult' + - properties: + code: + description: HTTP error code + format: int64 + type: integer + error: + description: Error description + type: string + type: object + MultiSearchSearchesParameter: + properties: + searches: + items: + $ref: '#/components/schemas/MultiSearchCollectionParameters' + type: array + union: + description: When true, merges the search results from each search query into a single ordered set of hits. + type: boolean + required: + - searches + type: object + PresetDeleteSchema: + properties: + name: + type: string + required: + - name + type: object + PresetSchema: + allOf: + - $ref: '#/components/schemas/PresetUpsertSchema' + - properties: + name: + type: string + required: + - name + type: object + PresetUpsertSchema: + properties: + value: + oneOf: + - $ref: '#/components/schemas/SearchParameters' + - $ref: '#/components/schemas/MultiSearchSearchesParameter' + required: + - value + PresetsRetrieveSchema: + properties: + presets: + items: + $ref: '#/components/schemas/PresetSchema' + type: array + x-go-type: '[]*PresetSchema' + required: + - presets + type: object + SchemaChangeStatus: + properties: + altered_docs: + description: Number of documents that have been altered + type: integer + collection: + description: Name of the collection being modified + type: string + validated_docs: + description: Number of documents that have been validated + type: integer + type: object + ScopedKeyParameters: + properties: + expires_at: + format: int64 + type: integer + filter_by: + type: string + type: object + SearchGroupedHit: + properties: + found: + type: integer + group_key: + items: {} + type: array + hits: + description: The documents that matched the search query + items: + $ref: '#/components/schemas/SearchResultHit' + type: array + required: + - group_key + - hits + type: object + SearchHighlight: + properties: + field: + example: company_name + type: string + indices: + description: The indices property will be present only for string[] fields and will contain the corresponding indices of the snippets in the search field + example: 1 + items: + type: integer + type: array + matched_tokens: + items: + type: object + x-go-type: interface{} + type: array + snippet: + description: Present only for (non-array) string fields + example: Stark Industries + type: string + snippets: + description: Present only for (array) string[] fields + example: + - Stark Industries + - Stark Corp + items: + type: string + type: array + value: + description: Full field value with highlighting, present only for (non-array) string fields + example: Stark Industries is a major supplier of space equipment. + type: string + values: + description: Full field value with highlighting, present only for (array) string[] fields + example: + - Stark Industries + - Stark Corp + items: + type: string + type: array + type: object + SearchOverride: + allOf: + - $ref: '#/components/schemas/SearchOverrideSchema' + - properties: + id: + readOnly: true + type: string + required: + - id + type: object + SearchOverrideDeleteResponse: + properties: + id: + description: The id of the override that was deleted + type: string + required: + - id + type: object + SearchOverrideExclude: + properties: + id: + description: document id that should be excluded from the search results. + type: string + required: + - id + type: object + SearchOverrideInclude: + properties: + id: + description: document id that should be included + type: string + position: + description: position number where document should be included in the search results + type: integer + required: + - id + - position + type: object + SearchOverrideRule: + properties: + filter_by: + description: | + Indicates that the override should apply when the filter_by parameter in a search query exactly matches the string specified here (including backticks, spaces, brackets, etc). + type: string + match: + description: | + Indicates whether the match on the query term should be `exact` or `contains`. If we want to match all queries that contained the word `apple`, we will use the `contains` match instead. + enum: + - exact + - contains + type: string + query: + description: Indicates what search queries should be overridden + type: string + tags: + description: List of tag values to associate with this override rule. + items: + type: string + type: array + type: object + SearchOverrideSchema: + properties: + effective_from_ts: + description: | + A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. + type: integer + effective_to_ts: + description: | + A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. + type: integer + excludes: + description: List of document `id`s that should be excluded from the search results. + items: + $ref: '#/components/schemas/SearchOverrideExclude' + type: array + filter_by: + description: | + A filter by clause that is applied to any search query that matches the override rule. + type: string + filter_curated_hits: + description: | + When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. + type: boolean + includes: + description: List of document `id`s that should be included in the search results with their corresponding `position`s. + items: + $ref: '#/components/schemas/SearchOverrideInclude' + type: array + metadata: + description: | + Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. + type: object + remove_matched_tokens: + description: | + Indicates whether search query tokens that exist in the override's rule should be removed from the search query. + type: boolean + replace_query: + description: | + Replaces the current search query with this value, when the search query matches the override rule. + type: string + rule: + $ref: '#/components/schemas/SearchOverrideRule' + sort_by: + description: | + A sort by clause that is applied to any search query that matches the override rule. + type: string + stop_processing: + description: | + When set to true, override processing will stop at the first matching rule. When set to false override processing will continue and multiple override actions will be triggered in sequence. Overrides are processed in the lexical sort order of their id field. Default: true. + type: boolean + required: + - rule + type: object + SearchOverridesResponse: + properties: + overrides: + items: + $ref: '#/components/schemas/SearchOverride' + type: array + x-go-type: '[]*SearchOverride' + required: + - overrides + type: object + SearchParameters: + properties: + cache_ttl: + description: | + The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. + type: integer + conversation: + description: | + Enable conversational search. + type: boolean + conversation_id: + description: | + The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. + type: string + conversation_model_id: + description: | + The Id of Conversation Model to be used. + type: string + drop_tokens_mode: + $ref: '#/components/schemas/DropTokensMode' + drop_tokens_threshold: + description: | + If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 + type: integer + enable_highlight_v1: + default: true + description: | + Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true + type: boolean + enable_overrides: + default: false + description: | + If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false + type: boolean + enable_synonyms: + description: | + If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true + type: boolean + enable_typos_for_alpha_numerical_tokens: + description: | + Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. + type: boolean + enable_typos_for_numerical_tokens: + default: true + description: | + Make Typesense disable typos for numerical tokens. + type: boolean + exclude_fields: + description: List of fields from the document to exclude in the search result + type: string + exhaustive_search: + description: | + Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). + type: boolean + facet_by: + description: A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. + type: string + facet_query: + description: Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix "shoe". + type: string + facet_return_parent: + description: | + Comma separated string of nested facet fields whose parent object should be returned in facet response. + type: string + facet_strategy: + description: | + Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). + type: string + filter_by: + description: Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. + example: 'num_employees:>100 && country: [USA, UK]' + type: string + filter_curated_hits: + description: | + Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false + type: boolean + group_by: + description: You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. + type: string + group_limit: + description: | + Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 + type: integer + group_missing_values: + description: | + Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true + type: boolean + hidden_hits: + description: | + A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. + You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + type: string + highlight_affix_num_tokens: + description: | + The number of tokens that should surround the highlighted text on each side. Default: 4 + type: integer + highlight_end_tag: + description: | + The end tag used for the highlighted snippets. Default: `` + type: string + highlight_fields: + description: | + A list of custom fields that must be highlighted even if you don't query for them + type: string + highlight_full_fields: + description: List of fields which should be highlighted fully without snippeting + type: string + highlight_start_tag: + description: | + The start tag used for the highlighted snippets. Default: `` + type: string + include_fields: + description: List of fields from the document to include in the search result + type: string + infix: + description: If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results + type: string + limit: + description: | + Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. + type: integer + max_candidates: + description: | + Control the number of words that Typesense considers for typo and prefix searching. + type: integer + max_extra_prefix: + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + type: integer + max_extra_suffix: + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + type: integer + max_facet_values: + description: Maximum number of facet values to be returned. + type: integer + max_filter_by_candidates: + description: Controls the number of similar words that Typesense considers during fuzzy search on filter_by values. Useful for controlling prefix matches like company_name:Acm*. + type: integer + min_len_1typo: + description: | + Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + type: integer + min_len_2typo: + description: | + Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + type: integer + num_typos: + description: | + The number of typographical errors (1 or 2) that would be tolerated. Default: 2 + type: string + offset: + description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. + type: integer + override_tags: + description: Comma separated list of tags to trigger the curations rules that match the tags. + type: string + page: + description: Results from this specific page number would be fetched. + type: integer + per_page: + description: 'Number of results to fetch per page. Default: 10' + type: integer + pinned_hits: + description: | + A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. + You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + type: string + pre_segmented_query: + description: | + You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. + Set this parameter to true to do the same + type: boolean + prefix: + description: Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. + type: string + preset: + description: | + Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. + type: string + prioritize_exact_match: + default: true + description: | + Set this parameter to true to ensure that an exact match is ranked above the others + type: boolean + prioritize_num_matching_fields: + default: true + description: | + Make Typesense prioritize documents where the query words appear in more number of fields. + type: boolean + prioritize_token_position: + default: false + description: | + Make Typesense prioritize documents where the query words appear earlier in the text. + type: boolean + q: + description: The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. + type: string + query_by: + description: A list of `string` fields that should be queried against. Multiple fields are separated with a comma. + type: string + query_by_weights: + description: The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. + type: string + remote_embedding_num_tries: + description: | + Number of times to retry fetching remote embeddings. + type: integer + remote_embedding_timeout_ms: + description: | + Timeout (in milliseconds) for fetching remote embeddings. + type: integer + search_cutoff_ms: + description: | + Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. + type: integer + snippet_threshold: + description: | + Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 + type: integer + sort_by: + description: A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` + example: num_employees:desc + type: string + split_join_tokens: + description: | + Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. Splitting/joining of tokens will only be attempted if the original query produces no results. To always trigger this behavior, set value to `always``. To disable, set value to `off`. Default is `fallback`. + type: string + stopwords: + description: | + Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. + type: string + synonym_num_typos: + description: | + Allow synonym resolution on typo-corrected words in the query. Default: 0 + type: integer + synonym_prefix: + description: | + Allow synonym resolution on word prefixes in the query. Default: false + type: boolean + text_match_type: + description: In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. + type: string + typo_tokens_threshold: + description: | + If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 + type: integer + use_cache: + description: | + Enable server side caching of search query results. By default, caching is disabled. + type: boolean + vector_query: + description: | + Vector query expression for fetching documents "closest" to a given query/document vector. + type: string + voice_query: + description: | + The base64 encoded audio file in 16 khz 16-bit WAV format. + type: string + type: object + SearchResult: + properties: + conversation: + $ref: '#/components/schemas/SearchResultConversation' + facet_counts: + items: + $ref: '#/components/schemas/FacetCounts' + type: array + found: + description: The number of documents found + type: integer + found_docs: + type: integer + grouped_hits: + items: + $ref: '#/components/schemas/SearchGroupedHit' + type: array + hits: + description: The documents that matched the search query + items: + $ref: '#/components/schemas/SearchResultHit' + type: array + out_of: + description: The total number of documents in the collection + type: integer + page: + description: The search result page number + type: integer + request_params: + properties: + collection_name: + type: string + per_page: + type: integer + q: + type: string + voice_query: + properties: + transcribed_query: + type: string + type: object + required: + - collection_name + - q + - per_page + type: object + search_cutoff: + description: Whether the search was cut off + type: boolean + search_time_ms: + description: The number of milliseconds the search took + type: integer + type: object + SearchResultConversation: + properties: + answer: + type: string + conversation_history: + items: + type: object + type: array + conversation_id: + type: string + query: + type: string + required: + - answer + - conversation_history + - conversation_id + - query + type: object + SearchResultHit: + example: + document: + company_name: Stark Industries + country: USA + id: "124" + num_employees: 5215 + highlights: + company_name: + field: company_name + snippet: Stark Industries + text_match: 1234556 + properties: + document: + description: Can be any key-value pair + type: object + geo_distance_meters: + additionalProperties: + type: integer + description: Can be any key-value pair + type: object + highlight: + additionalProperties: true + description: Highlighted version of the matching document + type: object + highlights: + description: (Deprecated) Contains highlighted portions of the search fields + items: + $ref: '#/components/schemas/SearchHighlight' + type: array + text_match: + format: int64 + type: integer + text_match_info: + properties: + best_field_score: + type: string + best_field_weight: + type: integer + fields_matched: + type: integer + num_tokens_dropped: + format: int64 + type: integer + x-go-type: uint64 + score: + type: string + tokens_matched: + type: integer + typo_prefix_score: + type: integer + type: object + vector_distance: + description: Distance between the query vector and matching document's vector value + format: float + type: number + type: object + SearchSynonym: + allOf: + - $ref: '#/components/schemas/SearchSynonymSchema' + - properties: + id: + readOnly: true + type: string + required: + - id + type: object + SearchSynonymDeleteResponse: + properties: + id: + description: The id of the synonym that was deleted + type: string + required: + - id + type: object + SearchSynonymSchema: + properties: + locale: + description: Locale for the synonym, leave blank to use the standard tokenizer. + type: string + root: + description: For 1-way synonyms, indicates the root word that words in the `synonyms` parameter map to. + type: string + symbols_to_index: + description: By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is. + items: + type: string + type: array + synonyms: + description: Array of words that should be considered as synonyms. + items: + type: string + type: array + required: + - synonyms + type: object + SearchSynonymsResponse: + properties: + synonyms: + items: + $ref: '#/components/schemas/SearchSynonym' + type: array + x-go-type: '[]*SearchSynonym' + required: + - synonyms + type: object + SnapshotParameters: + properties: + snapshot_path: + type: string + type: object + StemmingDictionary: + properties: + id: + description: Unique identifier for the dictionary + example: irregular-plurals + type: string + words: + description: List of word mappings in the dictionary + items: + properties: + root: + description: The root form of the word + example: person + type: string + word: + description: The word form to be stemmed + example: people + type: string + required: + - word + - root + type: object + type: array + required: + - id + - words + type: object + StopwordsSetRetrieveSchema: + example: | + {"stopwords": {"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"}} + properties: + stopwords: + $ref: '#/components/schemas/StopwordsSetSchema' + required: + - stopwords + type: object + StopwordsSetSchema: + example: | + {"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"} + properties: + id: + type: string + locale: + type: string + stopwords: + items: + type: string + type: array + required: + - id + - stopwords + type: object + StopwordsSetUpsertSchema: + example: | + {"stopwords": ["Germany", "France", "Italy"], "locale": "en"} + properties: + locale: + type: string + stopwords: + items: + type: string + type: array + required: + - stopwords + type: object + StopwordsSetsRetrieveAllSchema: + example: | + {"stopwords": [{"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"}]} + properties: + stopwords: + items: + $ref: '#/components/schemas/StopwordsSetSchema' + type: array + required: + - stopwords + type: object + SuccessStatus: + properties: + success: + type: boolean + required: + - success + type: object + VoiceQueryModelCollectionConfig: + description: | + Configuration for the voice query model + properties: + model_name: + example: ts/whisper/base.en + type: string + type: object + securitySchemes: + api_key_header: + in: header + name: X-TYPESENSE-API-KEY + type: apiKey +externalDocs: + description: Find out more about Typsesense + url: https://typesense.org +info: + description: An open source search engine for building delightful search experiences. + title: Typesense API + version: "28.0" +openapi: 3.0.3 +paths: + /aliases: + get: + description: List all aliases and the corresponding collections that they map to. + operationId: getAliases + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionAliasesResponse' + description: List of all collection aliases + summary: List all aliases + tags: + - collections + /aliases/{aliasName}: + delete: + operationId: deleteAlias + parameters: + - description: The name of the alias to delete + in: path + name: aliasName + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionAlias' + description: Collection alias was deleted + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Alias not found + summary: Delete an alias + tags: + - collections + get: + description: Find out which collection an alias points to by fetching it + operationId: getAlias + parameters: + - description: The name of the alias to retrieve + in: path + name: aliasName + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionAlias' + description: Collection alias fetched + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: The alias was not found + summary: Retrieve an alias + tags: + - collections + put: + description: Create or update a collection alias. An alias is a virtual collection name that points to a real collection. If you're familiar with symbolic links on Linux, it's very similar to that. Aliases are useful when you want to reindex your data in the background on a new collection and switch your application to it without any changes to your code. + operationId: upsertAlias + parameters: + - description: The name of the alias to create/update + in: path + name: aliasName + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionAliasSchema' + description: Collection alias to be created/updated + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionAlias' + description: The collection alias was created/updated + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Alias not found + summary: Create or update a collection alias + tags: + - collections + /analytics/events: + post: + description: Sending events for analytics e.g rank search results based on popularity. + operationId: createAnalyticsEvent + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsEventCreateSchema' + description: The Analytics event to be created + required: true + responses: + "201": + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsEventCreateResponse' + description: Analytics event successfully created + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details + summary: Create an analytics event + tags: + - analytics + /analytics/rules: + get: + description: Retrieve the details of all analytics rules + operationId: retrieveAnalyticsRules + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsRulesRetrieveSchema' + description: Analytics rules fetched + summary: Retrieves all analytics rules + tags: + - analytics + post: + description: When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. + operationId: createAnalyticsRule + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsRuleSchema' + description: The Analytics rule to be created + required: true + responses: + "201": + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsRuleSchema' + description: Analytics rule successfully created + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details + summary: Creates an analytics rule + tags: + - analytics + /analytics/rules/{ruleName}: + delete: + description: Permanently deletes an analytics rule, given it's name + operationId: deleteAnalyticsRule + parameters: + - description: The name of the analytics rule to delete + in: path + name: ruleName + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsRuleDeleteResponse' + description: Analytics rule deleted + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Analytics rule not found + summary: Delete an analytics rule + tags: + - analytics + get: + description: Retrieve the details of an analytics rule, given it's name + operationId: retrieveAnalyticsRule + parameters: + - description: The name of the analytics rule to retrieve + in: path + name: ruleName + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsRuleSchema' + description: Analytics rule fetched + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Analytics rule not found + summary: Retrieves an analytics rule + tags: + - analytics + put: + description: Upserts an analytics rule with the given name. + operationId: upsertAnalyticsRule + parameters: + - description: The name of the analytics rule to upsert + in: path + name: ruleName + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsRuleUpsertSchema' + description: The Analytics rule to be upserted + required: true + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsRuleSchema' + description: Analytics rule successfully upserted + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details + summary: Upserts an analytics rule + tags: + - analytics + /collections: + get: + description: Returns a summary of all your collections. The collections are returned sorted by creation date, with the most recent collections appearing first. + operationId: getCollections + responses: + "200": + content: + application/json: + schema: + items: + $ref: '#/components/schemas/CollectionResponse' + type: array + x-go-type: '[]*CollectionResponse' + description: List of all collections + summary: List all collections + tags: + - collections + post: + description: When a collection is created, we give it a name and describe the fields that will be indexed from the documents added to the collection. + operationId: createCollection + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionSchema' + description: The collection object to be created + required: true + responses: + "201": + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionResponse' + description: Collection successfully created + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details + "409": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Collection already exists + summary: Create a new collection + tags: + - collections + /collections/{collectionName}: + delete: + description: Permanently drops a collection. This action cannot be undone. For large collections, this might have an impact on read latencies. + operationId: deleteCollection + parameters: + - description: The name of the collection to delete + in: path + name: collectionName + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionResponse' + description: Collection deleted + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Collection not found + summary: Delete a collection + tags: + - collections + get: + description: Retrieve the details of a collection, given its name. + operationId: getCollection + parameters: + - description: The name of the collection to retrieve + in: path + name: collectionName + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionResponse' + description: Collection fetched + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Collection not found + summary: Retrieve a single collection + tags: + - collections + patch: + description: Update a collection's schema to modify the fields and their types. + operationId: updateCollection + parameters: + - description: The name of the collection to update + in: path + name: collectionName + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionUpdateSchema' + description: The document object with fields to be updated + required: true + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionUpdateSchema' + description: The updated partial collection schema + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: The collection was not found + summary: Update a collection + tags: + - collections + /collections/{collectionName}/documents: + delete: + description: Delete a bunch of documents that match a specific filter condition. Use the `batch_size` parameter to control the number of documents that should deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. + operationId: deleteDocuments + parameters: + - description: The name of the collection to delete documents from + in: path + name: collectionName + required: true + schema: + type: string + - in: query + name: batch_size + schema: + type: integer + - in: query + name: filter_by + schema: + type: string + - in: query + name: ignore_not_found + schema: + type: boolean + - in: query + name: truncate + schema: + type: boolean + responses: + "200": + content: + application/json: + schema: + properties: + num_deleted: + type: integer + required: + - num_deleted + type: object + description: Documents successfully deleted + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Collection not found + summary: Delete a bunch of documents + tags: + - documents + patch: + description: The filter_by query parameter is used to filter to specify a condition against which the documents are matched. The request body contains the fields that should be updated for any documents that match the filter condition. This endpoint is only available if the Typesense server is version `0.25.0.rc12` or later. + operationId: updateDocuments + parameters: + - description: The name of the collection to update documents in + in: path + name: collectionName + required: true + schema: + type: string + - in: query + name: filter_by + schema: + type: string + requestBody: + content: + application/json: + schema: + description: Can be any key-value pair + type: object + x-go-type: interface{} + description: The document fields to be updated + required: true + responses: + "200": + content: + application/json: + schema: + properties: + num_updated: + description: The number of documents that have been updated + example: 1 + type: integer + required: + - num_updated + type: object + description: The response contains a single field, `num_updated`, indicating the number of documents affected. + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: The collection was not found + summary: Update documents with conditional query + tags: + - documents + post: + description: A document to be indexed in a given collection must conform to the schema of the collection. + operationId: indexDocument + parameters: + - description: The name of the collection to add the document to + in: path + name: collectionName + required: true + schema: + type: string + - description: Additional action to perform + in: query + name: action + schema: + $ref: '#/components/schemas/IndexAction' + example: upsert + type: string + - description: Dealing with Dirty Data + in: query + name: dirty_values + schema: + $ref: '#/components/schemas/DirtyValues' + requestBody: + content: + application/json: + schema: + description: Can be any key-value pair + type: object + x-go-type: interface{} + description: The document object to be indexed + required: true + responses: + "201": + content: + application/json: + schema: + description: Can be any key-value pair + type: object + description: Document successfully created/indexed + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Collection not found + summary: Index a document + tags: + - documents + /collections/{collectionName}/documents/{documentId}: + delete: + description: Delete an individual document from a collection by using its ID. + operationId: deleteDocument + parameters: + - description: The name of the collection to search for the document under + in: path + name: collectionName + required: true + schema: + type: string + - description: The Document ID + in: path + name: documentId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + description: Can be any key-value pair + type: object + description: The document referenced by the ID was deleted + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: The document or collection was not found + summary: Delete a document + tags: + - documents + get: + description: Fetch an individual document from a collection by using its ID. + operationId: getDocument + parameters: + - description: The name of the collection to search for the document under + in: path + name: collectionName + required: true + schema: + type: string + - description: The Document ID + in: path + name: documentId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + description: Can be any key-value pair + type: object + description: The document referenced by the ID + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: The document or collection was not found + summary: Retreive a document + tags: + - documents + patch: + description: Update an individual document from a collection by using its ID. The update can be partial. + operationId: updateDocument + parameters: + - description: The name of the collection to search for the document under + in: path + name: collectionName + required: true + schema: + type: string + - description: The Document ID + in: path + name: documentId + required: true + schema: + type: string + - description: Dealing with Dirty Data + in: query + name: dirty_values + schema: + $ref: '#/components/schemas/DirtyValues' + requestBody: + content: + application/json: + schema: + description: Can be any key-value pair + type: object + x-go-type: interface{} + description: The document object with fields to be updated + required: true + responses: + "200": + content: + application/json: + schema: + description: Can be any key-value pair + type: object + description: The document referenced by the ID was updated + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: The document or collection was not found + summary: Update a document + tags: + - documents + /collections/{collectionName}/documents/export: + get: + description: Export all documents in a collection in JSON lines format. + operationId: exportDocuments + parameters: + - description: The name of the collection + in: path + name: collectionName + required: true + schema: + type: string + - in: query + name: exclude_fields + schema: + type: string + - in: query + name: filter_by + schema: + type: string + - in: query + name: include_fields + schema: + type: string + responses: + "200": + content: + application/octet-stream: + schema: + example: | + {"id": "124", "company_name": "Stark Industries", "num_employees": 5215, "country": "US"} + {"id": "125", "company_name": "Future Technology", "num_employees": 1232,"country": "UK"} + {"id": "126", "company_name": "Random Corp.", "num_employees": 531,"country": "AU"} + type: string + description: Exports all the documents in a given collection. + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: The collection was not found + summary: Export all documents in a collection + tags: + - documents + /collections/{collectionName}/documents/import: + post: + description: The documents to be imported must be formatted in a newline delimited JSON structure. You can feed the output file from a Typesense export operation directly as import. + operationId: importDocuments + parameters: + - description: The name of the collection + in: path + name: collectionName + required: true + schema: + type: string + - in: query + name: action + schema: + $ref: '#/components/schemas/IndexAction' + - in: query + name: batch_size + schema: + type: integer + - in: query + name: dirty_values + schema: + $ref: '#/components/schemas/DirtyValues' + - in: query + name: remote_embedding_batch_size + schema: + type: integer + - in: query + name: return_doc + schema: + type: boolean + - in: query + name: return_id + schema: + type: boolean + requestBody: + content: + application/octet-stream: + schema: + description: The JSONL file to import + type: string + description: The json array of documents or the JSONL file to import + required: true + responses: + "200": + content: + application/octet-stream: + schema: + example: | + {"success": true} + {"success": false, "error": "Bad JSON.", "document": "[bad doc"} + type: string + description: Result of the import operation. Each line of the response indicates the result of each document present in the request body (in the same order). If the import of a single document fails, it does not affect the other documents. If there is a failure, the response line will include a corresponding error message and as well as the actual document content. + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: The collection was not found + summary: Import documents into a collection + tags: + - documents + /collections/{collectionName}/documents/search: + get: + description: Search for documents in a collection that match the search criteria. + operationId: searchCollection + parameters: + - description: The name of the collection to search for the document under + in: path + name: collectionName + required: true + schema: + type: string + - in: query + name: cache_ttl + schema: + type: integer + - in: query + name: conversation + schema: + type: boolean + - in: query + name: conversation_id + schema: + type: string + - in: query + name: conversation_model_id + schema: + type: string + - in: query + name: drop_tokens_mode + schema: + $ref: '#/components/schemas/DropTokensMode' + - in: query + name: drop_tokens_threshold + schema: + type: integer + - in: query + name: enable_highlight_v1 + schema: + type: boolean + - in: query + name: enable_overrides + schema: + type: boolean + - in: query + name: enable_synonyms + schema: + type: boolean + - in: query + name: enable_typos_for_alpha_numerical_tokens + schema: + type: boolean + - in: query + name: enable_typos_for_numerical_tokens + schema: + type: boolean + - in: query + name: exclude_fields + schema: + type: string + - in: query + name: exhaustive_search + schema: + type: boolean + - in: query + name: facet_by + schema: + type: string + - in: query + name: facet_query + schema: + type: string + - in: query + name: facet_return_parent + schema: + type: string + - in: query + name: facet_strategy + schema: + type: string + - in: query + name: filter_by + schema: + type: string + - in: query + name: filter_curated_hits + schema: + type: boolean + - in: query + name: group_by + schema: + type: string + - in: query + name: group_limit + schema: + type: integer + - in: query + name: group_missing_values + schema: + type: boolean + - in: query + name: hidden_hits + schema: + type: string + - in: query + name: highlight_affix_num_tokens + schema: + type: integer + - in: query + name: highlight_end_tag + schema: + type: string + - in: query + name: highlight_fields + schema: + type: string + - in: query + name: highlight_full_fields + schema: + type: string + - in: query + name: highlight_start_tag + schema: + type: string + - in: query + name: include_fields + schema: + type: string + - in: query + name: infix + schema: + type: string + - in: query + name: limit + schema: + type: integer + - in: query + name: max_candidates + schema: + type: integer + - in: query + name: max_extra_prefix + schema: + type: integer + - in: query + name: max_extra_suffix + schema: + type: integer + - in: query + name: max_facet_values + schema: + type: integer + - in: query + name: max_filter_by_candidates + schema: + type: integer + - in: query + name: min_len_1typo + schema: + type: integer + - in: query + name: min_len_2typo + schema: + type: integer + - in: query + name: num_typos + schema: + type: string + - in: query + name: offset + schema: + type: integer + - in: query + name: override_tags + schema: + type: string + - in: query + name: page + schema: + type: integer + - in: query + name: per_page + schema: + type: integer + - in: query + name: pinned_hits + schema: + type: string + - in: query + name: pre_segmented_query + schema: + type: boolean + - in: query + name: prefix + schema: + type: string + - in: query + name: preset + schema: + type: string + - in: query + name: prioritize_exact_match + schema: + type: boolean + - in: query + name: prioritize_num_matching_fields + schema: + type: boolean + - in: query + name: prioritize_token_position + schema: + type: boolean + - in: query + name: q + schema: + type: string + - in: query + name: query_by + schema: + type: string + - in: query + name: query_by_weights + schema: + type: string + - in: query + name: remote_embedding_num_tries + schema: + type: integer + - in: query + name: remote_embedding_timeout_ms + schema: + type: integer + - in: query + name: search_cutoff_ms + schema: + type: integer + - in: query + name: snippet_threshold + schema: + type: integer + - in: query + name: sort_by + schema: + type: string + - in: query + name: split_join_tokens + schema: + type: string + - in: query + name: stopwords + schema: + type: string + - in: query + name: synonym_num_typos + schema: + type: integer + - in: query + name: synonym_prefix + schema: + type: boolean + - in: query + name: text_match_type + schema: + type: string + - in: query + name: typo_tokens_threshold + schema: + type: integer + - in: query + name: use_cache + schema: + type: boolean + - in: query + name: vector_query + schema: + type: string + - in: query + name: voice_query + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/SearchResult' + description: Search results + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: The collection or field was not found + summary: Search for documents in a collection + tags: + - documents + /collections/{collectionName}/overrides: + get: + operationId: getSearchOverrides + parameters: + - description: The name of the collection + in: path + name: collectionName + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/SearchOverridesResponse' + description: List of all search overrides + summary: List all collection overrides + tags: + - documents + - curation + /collections/{collectionName}/overrides/{overrideId}: + delete: + operationId: deleteSearchOverride + parameters: + - description: The name of the collection + in: path + name: collectionName + required: true + schema: + type: string + - description: The ID of the search override to delete + in: path + name: overrideId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/SearchOverrideDeleteResponse' + description: The ID of the deleted search override + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Search override not found + summary: Delete an override associated with a collection + tags: + - documents + - curation + get: + description: Retrieve the details of a search override, given its id. + operationId: getSearchOverride + parameters: + - description: The name of the collection + in: path + name: collectionName + required: true + schema: + type: string + - description: The id of the search override + in: path + name: overrideId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/SearchOverride' + description: Search override fetched + summary: Retrieve a single search override + tags: + - documents + - override + put: + description: Create or update an override to promote certain documents over others. Using overrides, you can include or exclude specific documents for a given query. + operationId: upsertSearchOverride + parameters: + - description: The name of the collection + in: path + name: collectionName + required: true + schema: + type: string + - description: The ID of the search override to create/update + in: path + name: overrideId + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/SearchOverrideSchema' + description: The search override object to be created/updated + required: true + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/SearchOverride' + description: Created/updated search override + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Search override not found + summary: Create or update an override to promote certain documents over others + tags: + - documents + - curation + /collections/{collectionName}/synonyms: + get: + operationId: getSearchSynonyms + parameters: + - description: The name of the collection + in: path + name: collectionName + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/SearchSynonymsResponse' + description: List of all search synonyms + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Search synonyms was not found + summary: List all collection synonyms + tags: + - synonyms + /collections/{collectionName}/synonyms/{synonymId}: + delete: + operationId: deleteSearchSynonym + parameters: + - description: The name of the collection + in: path + name: collectionName + required: true + schema: + type: string + - description: The ID of the search synonym to delete + in: path + name: synonymId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/SearchSynonymDeleteResponse' + description: The ID of the deleted search synonym + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Search synonym not found + summary: Delete a synonym associated with a collection + tags: + - synonyms + get: + description: Retrieve the details of a search synonym, given its id. + operationId: getSearchSynonym + parameters: + - description: The name of the collection + in: path + name: collectionName + required: true + schema: + type: string + - description: The id of the search synonym + in: path + name: synonymId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/SearchSynonym' + description: Search synonym fetched + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Search synonym was not found + summary: Retrieve a single search synonym + tags: + - synonyms + put: + description: Create or update a synonym to define search terms that should be considered equivalent. + operationId: upsertSearchSynonym + parameters: + - description: The name of the collection + in: path + name: collectionName + required: true + schema: + type: string + - description: The ID of the search synonym to create/update + in: path + name: synonymId + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/SearchSynonymSchema' + description: The search synonym object to be created/updated + required: true + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/SearchSynonym' + description: Created/updated search synonym + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Search synonym was not found + summary: Create or update a synonym + tags: + - synonyms + /conversations/models: + get: + description: Retrieve all conversation models + operationId: retrieveAllConversationModels + responses: + "200": + content: + application/json: + schema: + items: + $ref: '#/components/schemas/ConversationModelSchema' + type: array + x-go-type: '[]*ConversationModelSchema' + description: List of all conversation models + summary: List all conversation models + tags: + - conversations + post: + description: Create a Conversation Model + operationId: createConversationModel + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ConversationModelCreateSchema' + required: true + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/ConversationModelSchema' + description: Created Conversation Model + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details + tags: + - conversations + /conversations/models/{modelId}: + delete: + description: Delete a conversation model + operationId: deleteConversationModel + parameters: + - description: The id of the conversation model to delete + in: path + name: modelId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/ConversationModelSchema' + description: The conversation model was successfully deleted + summary: Delete a conversation model + tags: + - conversations + get: + description: Retrieve a conversation model + operationId: retrieveConversationModel + parameters: + - description: The id of the conversation model to retrieve + in: path + name: modelId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/ConversationModelSchema' + description: A conversation model + summary: Retrieve a conversation model + tags: + - conversations + put: + description: Update a conversation model + operationId: updateConversationModel + parameters: + - description: The id of the conversation model to update + in: path + name: modelId + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ConversationModelUpdateSchema' + required: true + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/ConversationModelSchema' + description: The conversation model was successfully updated + summary: Update a conversation model + tags: + - conversations + /debug: + get: + description: Print debugging information + operationId: debug + responses: + "200": + content: + application/json: + schema: + properties: + version: + type: string + type: object + description: Debugging information + summary: Print debugging information + tags: + - debug + /health: + get: + description: Checks if Typesense server is ready to accept requests. + operationId: health + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/HealthStatus' + description: Search service is ready for requests. + summary: Checks if Typesense server is ready to accept requests. + tags: + - health + /keys: + get: + operationId: getKeys + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKeysResponse' + description: List of all keys + summary: Retrieve (metadata about) all keys. + tags: + - keys + post: + description: Create an API Key with fine-grain access control. You can restrict access on both a per-collection and per-action level. The generated key is returned only during creation. You want to store this key carefully in a secure place. + operationId: createKey + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKeySchema' + description: The object that describes API key scope + responses: + "201": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKey' + description: Created API key + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details + "409": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: API key generation conflict + summary: Create an API Key + tags: + - keys + /keys/{keyId}: + delete: + operationId: deleteKey + parameters: + - description: The ID of the key to delete + in: path + name: keyId + required: true + schema: + format: int64 + type: integer + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKeyDeleteResponse' + description: The key referenced by the ID + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Key not found + summary: Delete an API key given its ID. + tags: + - keys + get: + description: Retrieve (metadata about) a key. Only the key prefix is returned when you retrieve a key. Due to security reasons, only the create endpoint returns the full API key. + operationId: getKey + parameters: + - description: The ID of the key to retrieve + in: path + name: keyId + required: true + schema: + format: int64 + type: integer + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKey' + description: The key referenced by the ID + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: The key was not found + summary: Retrieve (metadata about) a key + tags: + - keys + /metrics.json: + get: + description: Retrieve the metrics. + operationId: retrieveMetrics + responses: + "200": + content: + application/json: + schema: + type: object + description: Metrics fetched. + summary: Get current RAM, CPU, Disk & Network usage metrics. + tags: + - operations + /multi_search: + post: + description: This is especially useful to avoid round-trip network latencies incurred otherwise if each of these requests are sent in separate HTTP requests. You can also use this feature to do a federated search across multiple collections in a single HTTP request. + operationId: multiSearch + parameters: + - in: query + name: cache_ttl + schema: + type: integer + - in: query + name: conversation + schema: + type: boolean + - in: query + name: conversation_id + schema: + type: string + - in: query + name: conversation_model_id + schema: + type: string + - in: query + name: drop_tokens_mode + schema: + $ref: '#/components/schemas/DropTokensMode' + - in: query + name: drop_tokens_threshold + schema: + type: integer + - in: query + name: enable_highlight_v1 + schema: + type: boolean + - in: query + name: enable_overrides + schema: + type: boolean + - in: query + name: enable_synonyms + schema: + type: boolean + - in: query + name: enable_typos_for_alpha_numerical_tokens + schema: + type: boolean + - in: query + name: enable_typos_for_numerical_tokens + schema: + type: boolean + - in: query + name: exclude_fields + schema: + type: string + - in: query + name: exhaustive_search + schema: + type: boolean + - in: query + name: facet_by + schema: + type: string + - in: query + name: facet_query + schema: + type: string + - in: query + name: facet_return_parent + schema: + type: string + - in: query + name: facet_strategy + schema: + type: string + - in: query + name: filter_by + schema: + type: string + - in: query + name: filter_curated_hits + schema: + type: boolean + - in: query + name: group_by + schema: + type: string + - in: query + name: group_limit + schema: + type: integer + - in: query + name: group_missing_values + schema: + type: boolean + - in: query + name: hidden_hits + schema: + type: string + - in: query + name: highlight_affix_num_tokens + schema: + type: integer + - in: query + name: highlight_end_tag + schema: + type: string + - in: query + name: highlight_fields + schema: + type: string + - in: query + name: highlight_full_fields + schema: + type: string + - in: query + name: highlight_start_tag + schema: + type: string + - in: query + name: include_fields + schema: + type: string + - in: query + name: infix + schema: + type: string + - in: query + name: limit + schema: + type: integer + - in: query + name: max_candidates + schema: + type: integer + - in: query + name: max_extra_prefix + schema: + type: integer + - in: query + name: max_extra_suffix + schema: + type: integer + - in: query + name: max_facet_values + schema: + type: integer + - in: query + name: max_filter_by_candidates + schema: + type: integer + - in: query + name: min_len_1typo + schema: + type: integer + - in: query + name: min_len_2typo + schema: + type: integer + - in: query + name: num_typos + schema: + type: string + - in: query + name: offset + schema: + type: integer + - in: query + name: override_tags + schema: + type: string + - in: query + name: page + schema: + type: integer + - in: query + name: per_page + schema: + type: integer + - in: query + name: pinned_hits + schema: + type: string + - in: query + name: pre_segmented_query + schema: + type: boolean + - in: query + name: prefix + schema: + type: string + - in: query + name: preset + schema: + type: string + - in: query + name: prioritize_exact_match + schema: + type: boolean + - in: query + name: prioritize_num_matching_fields + schema: + type: boolean + - in: query + name: prioritize_token_position + schema: + type: boolean + - in: query + name: q + schema: + type: string + - in: query + name: query_by + schema: + type: string + - in: query + name: query_by_weights + schema: + type: string + - in: query + name: remote_embedding_num_tries + schema: + type: integer + - in: query + name: remote_embedding_timeout_ms + schema: + type: integer + - in: query + name: search_cutoff_ms + schema: + type: integer + - in: query + name: snippet_threshold + schema: + type: integer + - in: query + name: sort_by + schema: + type: string + - in: query + name: split_join_tokens + schema: + type: string + - in: query + name: stopwords + schema: + type: string + - in: query + name: synonym_num_typos + schema: + type: integer + - in: query + name: synonym_prefix + schema: + type: boolean + - in: query + name: text_match_type + schema: + type: string + - in: query + name: typo_tokens_threshold + schema: + type: integer + - in: query + name: use_cache + schema: + type: boolean + - in: query + name: vector_query + schema: + type: string + - in: query + name: voice_query + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/MultiSearchSearchesParameter' + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/MultiSearchResult' + description: Search results + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details + summary: send multiple search requests in a single HTTP request + tags: + - documents + /operations/schema_changes: + get: + description: Returns the status of any ongoing schema change operations. If no schema changes are in progress, returns an empty response. + operationId: getSchemaChanges + responses: + "200": + content: + application/json: + schema: + items: + $ref: '#/components/schemas/SchemaChangeStatus' + type: array + description: List of schema changes in progress + summary: Get the status of in-progress schema change operations + tags: + - operations + /operations/snapshot: + post: + description: Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. You can then backup the snapshot directory that gets created and later restore it as a data directory, as needed. + operationId: takeSnapshot + parameters: + - description: The directory on the server where the snapshot should be saved. + in: query + name: snapshot_path + required: true + schema: + type: string + responses: + "201": + content: + application/json: + schema: + $ref: '#/components/schemas/SuccessStatus' + description: Snapshot is created. + summary: Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. + tags: + - operations + /operations/vote: + post: + description: Triggers a follower node to initiate the raft voting process, which triggers leader re-election. The follower node that you run this operation against will become the new leader, once this command succeeds. + operationId: vote + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/SuccessStatus' + description: Re-election is performed. + summary: Triggers a follower node to initiate the raft voting process, which triggers leader re-election. + tags: + - operations + /presets: + get: + description: Retrieve the details of all presets + operationId: retrieveAllPresets + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/PresetsRetrieveSchema' + description: Presets fetched. + summary: Retrieves all presets. + tags: + - presets + /presets/{presetId}: + delete: + description: Permanently deletes a preset, given it's name. + operationId: deletePreset + parameters: + - description: The ID of the preset to delete. + example: listing_view + in: path + name: presetId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/PresetDeleteSchema' + description: Preset deleted. + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Preset not found. + summary: Delete a preset. + tags: + - presets + get: + description: Retrieve the details of a preset, given it's name. + operationId: retrievePreset + parameters: + - description: The ID of the preset to retrieve. + example: listing_view + in: path + name: presetId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/PresetSchema' + description: Preset fetched. + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Preset not found. + summary: Retrieves a preset. + tags: + - presets + put: + description: Create or update an existing preset. + operationId: upsertPreset + parameters: + - description: The name of the preset set to upsert. + example: listing_view + in: path + name: presetId + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/PresetUpsertSchema' + description: The stopwords set to upsert. + required: true + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/PresetSchema' + description: Preset successfully upserted. + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details. + summary: Upserts a preset. + tags: + - presets + /stats.json: + get: + description: Retrieve the stats about API endpoints. + operationId: retrieveAPIStats + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/APIStatsResponse' + description: Stats fetched. + summary: Get stats about API endpoints. + tags: + - operations + /stemming/dictionaries: + get: + description: Retrieve a list of all available stemming dictionaries. + operationId: listStemmingDictionaries + responses: + "200": + content: + application/json: + schema: + properties: + dictionaries: + example: + - irregular-plurals + - company-terms + items: + type: string + type: array + type: object + description: List of all dictionaries + summary: List all stemming dictionaries + tags: + - stemming + /stemming/dictionaries/{dictionaryId}: + get: + description: Fetch details of a specific stemming dictionary. + operationId: getStemmingDictionary + parameters: + - description: The ID of the dictionary to retrieve + example: irregular-plurals + in: path + name: dictionaryId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/StemmingDictionary' + description: Stemming dictionary details + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Dictionary not found + summary: Retrieve a stemming dictionary + tags: + - stemming + /stemming/dictionaries/import: + post: + description: Upload a JSONL file containing word mappings to create or update a stemming dictionary. + operationId: importStemmingDictionary + parameters: + - description: The ID to assign to the dictionary + example: irregular-plurals + in: query + name: id + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + example: | + {"word": "people", "root": "person"} + {"word": "children", "root": "child"} + type: string + description: The JSONL file containing word mappings + required: true + responses: + "200": + content: + application/octet-stream: + schema: + example: | + {"word": "people", "root": "person"} {"word": "children", "root": "child"} + type: string + description: Dictionary successfully imported + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details + summary: Import a stemming dictionary + tags: + - stemming + /stopwords: + get: + description: Retrieve the details of all stopwords sets + operationId: retrieveStopwordsSets + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/StopwordsSetsRetrieveAllSchema' + description: Stopwords sets fetched. + summary: Retrieves all stopwords sets. + tags: + - stopwords + /stopwords/{setId}: + delete: + description: Permanently deletes a stopwords set, given it's name. + operationId: deleteStopwordsSet + parameters: + - description: The ID of the stopwords set to delete. + example: countries + in: path + name: setId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + example: | + {"id": "countries"} + properties: + id: + type: string + required: + - id + type: object + description: Stopwords set rule deleted. + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Stopwords set not found. + summary: Delete a stopwords set. + tags: + - stopwords + get: + description: Retrieve the details of a stopwords set, given it's name. + operationId: retrieveStopwordsSet + parameters: + - description: The ID of the stopwords set to retrieve. + example: countries + in: path + name: setId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/StopwordsSetRetrieveSchema' + description: Stopwords set fetched. + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Stopwords set not found. + summary: Retrieves a stopwords set. + tags: + - stopwords + put: + description: When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. + operationId: upsertStopwordsSet + parameters: + - description: The ID of the stopwords set to upsert. + example: countries + in: path + name: setId + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/StopwordsSetUpsertSchema' + description: The stopwords set to upsert. + required: true + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/StopwordsSetSchema' + description: Stopwords set successfully upserted. + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details. + summary: Upserts a stopwords set. + tags: + - stopwords +security: + - api_key_header: [] +tags: + - description: A collection is defined by a schema + externalDocs: + description: Find out more + url: https://typesense.org/api/#create-collection + name: collections + - description: A document is an individual record to be indexed and belongs to a collection + externalDocs: + description: Find out more + url: https://typesense.org/api/#index-document + name: documents + - description: Hand-curate search results based on conditional business rules + externalDocs: + description: Find out more + url: https://typesense.org/docs/0.23.0/api/#curation + name: curation + - description: Typesense can aggregate search queries for both analytics purposes and for query suggestions. + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/analytics-query-suggestions.html + name: analytics + - description: Manage API Keys with fine-grain access control + externalDocs: + description: Find out more + url: https://typesense.org/docs/0.23.0/api/#api-keys + name: keys + - description: Debugging information + name: debug + - description: Manage Typesense cluster + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/cluster-operations.html + name: operations + - description: Manage stopwords sets + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/stopwords.html + name: stopwords + - description: Store and reference search parameters + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/search.html#presets + name: presets + - description: Conversational Search (RAG) + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/conversational-search-rag.html + name: conversations + - description: Manage synonyms + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/synonyms.html + name: synonyms + - description: Manage stemming dictionaries + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/stemming.html + name: stemming diff --git a/typesense/Cargo.toml b/typesense/Cargo.toml index 1574b32..65cecf0 100644 --- a/typesense/Cargo.toml +++ b/typesense/Cargo.toml @@ -2,7 +2,7 @@ name = "typesense" version = "0.1.0" authors = ["Typesense "] -edition = "2018" +edition = "2021" license = "Apache-2.0" description = "WIP client for typesense" @@ -25,6 +25,9 @@ serde_json = "1.0" sha2 = "0.10" typesense_derive = { version = "0.1.0", path = "../typesense_derive", optional = true } typesense_codegen = { version = "0.25.0", path = "../typesense_codegen" } +reqwest-retry = "0.7.0" +reqwest = { version = "0.11", features = ["json"] } +reqwest-middleware = { version = "0.3", features = ["json"] } [dev-dependencies] dotenvy = "0.15" @@ -46,3 +49,4 @@ required-features = ["derive"] [[test]] name = "api_tests" path = "tests/api/lib.rs" + diff --git a/typesense/src/client/collections.rs b/typesense/src/client/collections.rs new file mode 100644 index 0000000..cf9a6b0 --- /dev/null +++ b/typesense/src/client/collections.rs @@ -0,0 +1,26 @@ +use super::Client; // Use the parent module's Client +use typesense_codegen::apis::{collections_api, Error}; +use typesense_codegen::models::{CollectionResponse, CollectionSchema}; + +// This struct holds a temporary reference to the main client. +// The lifetime parameter `'c` ensures it cannot outlive the Client it borrows from. +pub struct Collections<'c> { + pub client: &'c Client, +} + +// Implement the public methods on the Collections struct. +impl<'c> Collections<'c> { + /// Retrieve the details of a collection, given its name. + pub async fn get(&self, collection_name: &str) -> Result> { + // It calls back to the generic helper method on the main client. + let path = format!("/collections/{}", collection_name); + self.client.get(&path, None).await + } + + /// When a collection is created, we give it a name and describe the fields. + pub async fn create(&self, schema: &CollectionSchema) -> Result> { + self.client.post("/collections", schema, None).await + } + + // ... all other collection-related methods go here ... +} diff --git a/typesense/src/client/documents.rs b/typesense/src/client/documents.rs new file mode 100644 index 0000000..e69de29 diff --git a/typesense/src/client/mod.rs b/typesense/src/client/mod.rs new file mode 100644 index 0000000..cb27285 --- /dev/null +++ b/typesense/src/client/mod.rs @@ -0,0 +1,144 @@ +// in src/client/mod.rs + +// Make the sub-modules public within the client module +pub mod collections; +pub mod documents; + +// Re-export the namespace structs for easier access +pub use collections::Collections; +pub use documents::Documents; + +use reqwest::Url; +use reqwest_middleware::{ClientBuilder, ClientWithMiddleware}; +use reqwest_retry::policies::ExponentialBackoff; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::time::Duration; +use typesense_codegen::apis::Error; // Use the generated Error type + +pub mod collections; +// Public configuration for the user +pub struct MultiNodeConfiguration { + pub nodes: Vec, + pub api_key: String, + pub retry_policy: ExponentialBackoff, + pub connection_timeout: Duration, +} + +// The main public client +pub struct Client { + config: MultiNodeConfiguration, + http_client: ClientWithMiddleware, // The client that handles retries on a single node + current_node_index: AtomicUsize, +} +// In the `impl Client` block +impl Client { + pub fn new(config: MultiNodeConfiguration) -> Self { + let http_client = ClientBuilder::new( + reqwest::Client::builder() + .timeout(config.connection_timeout) + .build() + .expect("Failed to build reqwest client"), + ) + // The retry middleware will handle transient errors for a SINGLE request + .with(reqwest_retry::RetryTransientMiddleware::new_with_policy(config.retry_policy.clone())) + .build(); + + Self { + config, + http_client, + current_node_index: AtomicUsize::new(0), + } + } + + // Simple round-robin node selection for trying the next node on failure + fn get_next_node(&self) -> &Url { + let index = self.current_node_index.fetch_add(1, Ordering::Relaxed); + &self.config.nodes[index % self.config.nodes.len()] + } + + /// A generic POST request handler. + /// It tries each node in sequence if the previous one fails with a retriable error. + async fn post( + &self, + path: &str, + body: &T, + // We accept optional query params now + query_params: Option<&[(&str, String)]>, + ) -> Result> + where + T: serde::Serialize + ?Sized, + U: for<'de> serde::Deserialize<'de>, + E: for<'de> serde::Deserialize<'de> + std::fmt::Debug, + { + self.execute_request(reqwest::Method::POST, path, Some(body), query_params).await + } + + // You would create similar `get`, `delete`, and `patch` helpers + async fn get(&self, path: &str, query_params: Option<&[(&str, String)]>) -> Result> + where + U: for<'de> serde::Deserialize<'de>, + E: for<'de> serde::Deserialize<'de> + std::fmt::Debug, + { + self.execute_request::(reqwest::Method::GET, path, None, query_params).await + } + + /// The single, generic request executor containing all the logic. + async fn execute_request(&self, method: reqwest::Method, path: &str, body: Option<&T>, query_params: Option<&[(&str, String)]>) -> Result> + where + T: serde::Serialize + ?Sized, + U: for<'de> serde::Deserialize<'de>, + E: for<'de> serde::Deserialize<'de> + std::fmt::Debug, + { + let mut last_error: Option> = None; + + for _ in 0..self.config.nodes.len() { + let node_url = self.get_next_node(); + let full_url = format!("{}{}", node_url.as_str().trim_end_matches('/'), path); + + let mut request_builder = self.http_client.request(method.clone(), &full_url).header("X-TYPESENSE-API-KEY", &self.config.api_key); + + if let Some(body) = body { + request_builder = request_builder.json(body); + } + + if let Some(params) = query_params { + request_builder = request_builder.query(params); + } + + match request_builder.send().await { + Ok(response) => { + // If the request was successful, parse the response and return. + return Self::handle_response(response).await; + } + Err(e) => { + // This error is from the reqwest-middleware layer, likely a connection + // error or because all retries on this single node were exhausted. + // We'll log it and try the next node. + eprintln!("Request to node {} failed: {}. Trying next node.", node_url, e); + last_error = Some(Error::Middleware(e)); + } + } + } + // If all nodes have been tried and failed, return the last error. + Err(last_error.expect("No nodes were available to try")) + } + + /// Generic response handler adapted from the generated code. + /// This parses a success response or a typed error response. + async fn handle_response(resp: reqwest::Response) -> Result> + where + U: for<'de> serde::Deserialize<'de>, + E: for<'de> serde::Deserialize<'de>, + { + let status = resp.status(); + let content = resp.text().await.map_err(Error::Reqwest)?; + + if status.is_success() { + serde_json::from_str(&content).map_err(Error::Serde) + } else { + let entity: Option = serde_json::from_str(&content).ok(); + let error = typesense_codegen::apis::ResponseContent { status, content, entity }; + Err(Error::ResponseError(error)) + } + } +} diff --git a/typesense/src/lib.rs b/typesense/src/lib.rs index 286c959..b0a85fa 100644 --- a/typesense/src/lib.rs +++ b/typesense/src/lib.rs @@ -43,6 +43,7 @@ //! } //! ``` +pub mod client; pub mod collection_schema; pub mod document; pub mod field; diff --git a/typesense_codegen/.openapi-generator-ignore b/typesense_codegen/.openapi-generator-ignore index 7484ee5..0161386 100644 --- a/typesense_codegen/.openapi-generator-ignore +++ b/typesense_codegen/.openapi-generator-ignore @@ -21,3 +21,5 @@ #docs/*.md # Then explicitly reverse the ignore rule for a single file: #!docs/README.md + +Cargo.toml \ No newline at end of file diff --git a/typesense_codegen/.openapi-generator/FILES b/typesense_codegen/.openapi-generator/FILES index 2e9cc88..6df3cf7 100644 --- a/typesense_codegen/.openapi-generator/FILES +++ b/typesense_codegen/.openapi-generator/FILES @@ -1,18 +1,23 @@ .gitignore -.openapi-generator-ignore .travis.yml -Cargo.toml README.md docs/AnalyticsApi.md +docs/AnalyticsEventCreateResponse.md +docs/AnalyticsEventCreateSchema.md +docs/AnalyticsRuleDeleteResponse.md docs/AnalyticsRuleParameters.md docs/AnalyticsRuleParametersDestination.md docs/AnalyticsRuleParametersSource.md +docs/AnalyticsRuleParametersSourceEventsInner.md docs/AnalyticsRuleSchema.md +docs/AnalyticsRuleUpsertSchema.md docs/AnalyticsRulesRetrieveSchema.md docs/ApiKey.md +docs/ApiKeyDeleteResponse.md docs/ApiKeySchema.md docs/ApiKeysResponse.md docs/ApiResponse.md +docs/ApiStatsResponse.md docs/CollectionAlias.md docs/CollectionAliasSchema.md docs/CollectionAliasesResponse.md @@ -20,13 +25,20 @@ docs/CollectionResponse.md docs/CollectionSchema.md docs/CollectionUpdateSchema.md docs/CollectionsApi.md +docs/ConversationModelCreateSchema.md +docs/ConversationModelSchema.md +docs/ConversationModelUpdateSchema.md +docs/ConversationsApi.md +docs/CurationApi.md docs/Debug200Response.md docs/DebugApi.md docs/DeleteDocuments200Response.md -docs/DeleteDocumentsDeleteDocumentsParametersParameter.md +docs/DeleteStopwordsSet200Response.md +docs/DirtyValues.md +docs/DocumentIndexParameters.md docs/DocumentsApi.md +docs/DropTokensMode.md docs/ErrorResponse.md -docs/ExportDocumentsExportDocumentsParametersParameter.md docs/FacetCounts.md docs/FacetCountsCountsInner.md docs/FacetCountsStats.md @@ -35,19 +47,28 @@ docs/FieldEmbed.md docs/FieldEmbedModelConfig.md docs/HealthApi.md docs/HealthStatus.md -docs/ImportDocumentsImportDocumentsParametersParameter.md +docs/IndexAction.md docs/KeysApi.md +docs/ListStemmingDictionaries200Response.md docs/MultiSearchCollectionParameters.md docs/MultiSearchParameters.md docs/MultiSearchResult.md +docs/MultiSearchResultItem.md docs/MultiSearchSearchesParameter.md docs/OperationsApi.md docs/OverrideApi.md -docs/PromoteApi.md +docs/PresetDeleteSchema.md +docs/PresetSchema.md +docs/PresetUpsertSchema.md +docs/PresetUpsertSchemaValue.md +docs/PresetsApi.md +docs/PresetsRetrieveSchema.md +docs/SchemaChangeStatus.md docs/ScopedKeyParameters.md docs/SearchGroupedHit.md docs/SearchHighlight.md docs/SearchOverride.md +docs/SearchOverrideDeleteResponse.md docs/SearchOverrideExclude.md docs/SearchOverrideInclude.md docs/SearchOverrideRule.md @@ -55,19 +76,34 @@ docs/SearchOverrideSchema.md docs/SearchOverridesResponse.md docs/SearchParameters.md docs/SearchResult.md +docs/SearchResultConversation.md docs/SearchResultHit.md +docs/SearchResultHitTextMatchInfo.md docs/SearchResultRequestParams.md +docs/SearchResultRequestParamsVoiceQuery.md docs/SearchSynonym.md +docs/SearchSynonymDeleteResponse.md docs/SearchSynonymSchema.md docs/SearchSynonymsResponse.md docs/SnapshotParameters.md +docs/StemmingApi.md +docs/StemmingDictionary.md +docs/StemmingDictionaryWordsInner.md +docs/StopwordsApi.md +docs/StopwordsSetRetrieveSchema.md +docs/StopwordsSetSchema.md +docs/StopwordsSetUpsertSchema.md +docs/StopwordsSetsRetrieveAllSchema.md docs/SuccessStatus.md +docs/SynonymsApi.md docs/UpdateDocuments200Response.md -docs/UpdateDocumentsUpdateDocumentsParametersParameter.md +docs/VoiceQueryModelCollectionConfig.md git_push.sh src/apis/analytics_api.rs src/apis/collections_api.rs src/apis/configuration.rs +src/apis/conversations_api.rs +src/apis/curation_api.rs src/apis/debug_api.rs src/apis/documents_api.rs src/apis/health_api.rs @@ -75,28 +111,43 @@ src/apis/keys_api.rs src/apis/mod.rs src/apis/operations_api.rs src/apis/override_api.rs -src/apis/promote_api.rs +src/apis/presets_api.rs +src/apis/stemming_api.rs +src/apis/stopwords_api.rs +src/apis/synonyms_api.rs src/lib.rs +src/models/analytics_event_create_response.rs +src/models/analytics_event_create_schema.rs +src/models/analytics_rule_delete_response.rs src/models/analytics_rule_parameters.rs src/models/analytics_rule_parameters_destination.rs src/models/analytics_rule_parameters_source.rs +src/models/analytics_rule_parameters_source_events_inner.rs src/models/analytics_rule_schema.rs +src/models/analytics_rule_upsert_schema.rs src/models/analytics_rules_retrieve_schema.rs src/models/api_key.rs +src/models/api_key_delete_response.rs src/models/api_key_schema.rs src/models/api_keys_response.rs src/models/api_response.rs +src/models/api_stats_response.rs src/models/collection_alias.rs src/models/collection_alias_schema.rs src/models/collection_aliases_response.rs src/models/collection_response.rs src/models/collection_schema.rs src/models/collection_update_schema.rs +src/models/conversation_model_create_schema.rs +src/models/conversation_model_schema.rs +src/models/conversation_model_update_schema.rs src/models/debug_200_response.rs src/models/delete_documents_200_response.rs -src/models/delete_documents_delete_documents_parameters_parameter.rs +src/models/delete_stopwords_set_200_response.rs +src/models/dirty_values.rs +src/models/document_index_parameters.rs +src/models/drop_tokens_mode.rs src/models/error_response.rs -src/models/export_documents_export_documents_parameters_parameter.rs src/models/facet_counts.rs src/models/facet_counts_counts_inner.rs src/models/facet_counts_stats.rs @@ -104,16 +155,25 @@ src/models/field.rs src/models/field_embed.rs src/models/field_embed_model_config.rs src/models/health_status.rs -src/models/import_documents_import_documents_parameters_parameter.rs +src/models/index_action.rs +src/models/list_stemming_dictionaries_200_response.rs src/models/mod.rs src/models/multi_search_collection_parameters.rs src/models/multi_search_parameters.rs src/models/multi_search_result.rs +src/models/multi_search_result_item.rs src/models/multi_search_searches_parameter.rs +src/models/preset_delete_schema.rs +src/models/preset_schema.rs +src/models/preset_upsert_schema.rs +src/models/preset_upsert_schema_value.rs +src/models/presets_retrieve_schema.rs +src/models/schema_change_status.rs src/models/scoped_key_parameters.rs src/models/search_grouped_hit.rs src/models/search_highlight.rs src/models/search_override.rs +src/models/search_override_delete_response.rs src/models/search_override_exclude.rs src/models/search_override_include.rs src/models/search_override_rule.rs @@ -121,12 +181,22 @@ src/models/search_override_schema.rs src/models/search_overrides_response.rs src/models/search_parameters.rs src/models/search_result.rs +src/models/search_result_conversation.rs src/models/search_result_hit.rs +src/models/search_result_hit_text_match_info.rs src/models/search_result_request_params.rs +src/models/search_result_request_params_voice_query.rs src/models/search_synonym.rs +src/models/search_synonym_delete_response.rs src/models/search_synonym_schema.rs src/models/search_synonyms_response.rs src/models/snapshot_parameters.rs +src/models/stemming_dictionary.rs +src/models/stemming_dictionary_words_inner.rs +src/models/stopwords_set_retrieve_schema.rs +src/models/stopwords_set_schema.rs +src/models/stopwords_set_upsert_schema.rs +src/models/stopwords_sets_retrieve_all_schema.rs src/models/success_status.rs src/models/update_documents_200_response.rs -src/models/update_documents_update_documents_parameters_parameter.rs +src/models/voice_query_model_collection_config.rs diff --git a/typesense_codegen/.openapi-generator/VERSION b/typesense_codegen/.openapi-generator/VERSION index c9e125b..e465da4 100644 --- a/typesense_codegen/.openapi-generator/VERSION +++ b/typesense_codegen/.openapi-generator/VERSION @@ -1 +1 @@ -7.4.0-SNAPSHOT +7.14.0 diff --git a/typesense_codegen/README.md b/typesense_codegen/README.md index c634c1d..ddffbb8 100644 --- a/typesense_codegen/README.md +++ b/typesense_codegen/README.md @@ -7,8 +7,9 @@ An open source search engine for building delightful search experiences. This API client was generated by the [OpenAPI Generator](https://openapi-generator.tech) project. By using the [openapi-spec](https://openapis.org) from a remote server, you can easily generate an API client. -- API version: 0.25.0 -- Package version: 0.25.0 +- API version: 28.0 +- Package version: 28.0 +- Generator version: 7.14.0 - Build package: `org.openapitools.codegen.languages.RustClientCodegen` ## Installation @@ -25,10 +26,12 @@ All URIs are relative to *http://localhost* Class | Method | HTTP request | Description ------------ | ------------- | ------------- | ------------- +*AnalyticsApi* | [**create_analytics_event**](docs/AnalyticsApi.md#create_analytics_event) | **POST** /analytics/events | Create an analytics event *AnalyticsApi* | [**create_analytics_rule**](docs/AnalyticsApi.md#create_analytics_rule) | **POST** /analytics/rules | Creates an analytics rule *AnalyticsApi* | [**delete_analytics_rule**](docs/AnalyticsApi.md#delete_analytics_rule) | **DELETE** /analytics/rules/{ruleName} | Delete an analytics rule *AnalyticsApi* | [**retrieve_analytics_rule**](docs/AnalyticsApi.md#retrieve_analytics_rule) | **GET** /analytics/rules/{ruleName} | Retrieves an analytics rule *AnalyticsApi* | [**retrieve_analytics_rules**](docs/AnalyticsApi.md#retrieve_analytics_rules) | **GET** /analytics/rules | Retrieves all analytics rules +*AnalyticsApi* | [**upsert_analytics_rule**](docs/AnalyticsApi.md#upsert_analytics_rule) | **PUT** /analytics/rules/{ruleName} | Upserts an analytics rule *CollectionsApi* | [**create_collection**](docs/CollectionsApi.md#create_collection) | **POST** /collections | Create a new collection *CollectionsApi* | [**delete_alias**](docs/CollectionsApi.md#delete_alias) | **DELETE** /aliases/{aliasName} | Delete an alias *CollectionsApi* | [**delete_collection**](docs/CollectionsApi.md#delete_collection) | **DELETE** /collections/{collectionName} | Delete a collection @@ -38,17 +41,22 @@ Class | Method | HTTP request | Description *CollectionsApi* | [**get_collections**](docs/CollectionsApi.md#get_collections) | **GET** /collections | List all collections *CollectionsApi* | [**update_collection**](docs/CollectionsApi.md#update_collection) | **PATCH** /collections/{collectionName} | Update a collection *CollectionsApi* | [**upsert_alias**](docs/CollectionsApi.md#upsert_alias) | **PUT** /aliases/{aliasName} | Create or update a collection alias +*ConversationsApi* | [**create_conversation_model**](docs/ConversationsApi.md#create_conversation_model) | **POST** /conversations/models | +*ConversationsApi* | [**delete_conversation_model**](docs/ConversationsApi.md#delete_conversation_model) | **DELETE** /conversations/models/{modelId} | Delete a conversation model +*ConversationsApi* | [**retrieve_all_conversation_models**](docs/ConversationsApi.md#retrieve_all_conversation_models) | **GET** /conversations/models | List all conversation models +*ConversationsApi* | [**retrieve_conversation_model**](docs/ConversationsApi.md#retrieve_conversation_model) | **GET** /conversations/models/{modelId} | Retrieve a conversation model +*ConversationsApi* | [**update_conversation_model**](docs/ConversationsApi.md#update_conversation_model) | **PUT** /conversations/models/{modelId} | Update a conversation model +*CurationApi* | [**delete_search_override**](docs/CurationApi.md#delete_search_override) | **DELETE** /collections/{collectionName}/overrides/{overrideId} | Delete an override associated with a collection +*CurationApi* | [**get_search_overrides**](docs/CurationApi.md#get_search_overrides) | **GET** /collections/{collectionName}/overrides | List all collection overrides +*CurationApi* | [**upsert_search_override**](docs/CurationApi.md#upsert_search_override) | **PUT** /collections/{collectionName}/overrides/{overrideId} | Create or update an override to promote certain documents over others *DebugApi* | [**debug**](docs/DebugApi.md#debug) | **GET** /debug | Print debugging information *DocumentsApi* | [**delete_document**](docs/DocumentsApi.md#delete_document) | **DELETE** /collections/{collectionName}/documents/{documentId} | Delete a document *DocumentsApi* | [**delete_documents**](docs/DocumentsApi.md#delete_documents) | **DELETE** /collections/{collectionName}/documents | Delete a bunch of documents *DocumentsApi* | [**delete_search_override**](docs/DocumentsApi.md#delete_search_override) | **DELETE** /collections/{collectionName}/overrides/{overrideId} | Delete an override associated with a collection -*DocumentsApi* | [**delete_search_synonym**](docs/DocumentsApi.md#delete_search_synonym) | **DELETE** /collections/{collectionName}/synonyms/{synonymId} | Delete a synonym associated with a collection *DocumentsApi* | [**export_documents**](docs/DocumentsApi.md#export_documents) | **GET** /collections/{collectionName}/documents/export | Export all documents in a collection *DocumentsApi* | [**get_document**](docs/DocumentsApi.md#get_document) | **GET** /collections/{collectionName}/documents/{documentId} | Retreive a document *DocumentsApi* | [**get_search_override**](docs/DocumentsApi.md#get_search_override) | **GET** /collections/{collectionName}/overrides/{overrideId} | Retrieve a single search override *DocumentsApi* | [**get_search_overrides**](docs/DocumentsApi.md#get_search_overrides) | **GET** /collections/{collectionName}/overrides | List all collection overrides -*DocumentsApi* | [**get_search_synonym**](docs/DocumentsApi.md#get_search_synonym) | **GET** /collections/{collectionName}/synonyms/{synonymId} | Retrieve a single search synonym -*DocumentsApi* | [**get_search_synonyms**](docs/DocumentsApi.md#get_search_synonyms) | **GET** /collections/{collectionName}/synonyms | List all collection synonyms *DocumentsApi* | [**import_documents**](docs/DocumentsApi.md#import_documents) | **POST** /collections/{collectionName}/documents/import | Import documents into a collection *DocumentsApi* | [**index_document**](docs/DocumentsApi.md#index_document) | **POST** /collections/{collectionName}/documents | Index a document *DocumentsApi* | [**multi_search**](docs/DocumentsApi.md#multi_search) | **POST** /multi_search | send multiple search requests in a single HTTP request @@ -56,42 +64,68 @@ Class | Method | HTTP request | Description *DocumentsApi* | [**update_document**](docs/DocumentsApi.md#update_document) | **PATCH** /collections/{collectionName}/documents/{documentId} | Update a document *DocumentsApi* | [**update_documents**](docs/DocumentsApi.md#update_documents) | **PATCH** /collections/{collectionName}/documents | Update documents with conditional query *DocumentsApi* | [**upsert_search_override**](docs/DocumentsApi.md#upsert_search_override) | **PUT** /collections/{collectionName}/overrides/{overrideId} | Create or update an override to promote certain documents over others -*DocumentsApi* | [**upsert_search_synonym**](docs/DocumentsApi.md#upsert_search_synonym) | **PUT** /collections/{collectionName}/synonyms/{synonymId} | Create or update a synonym *HealthApi* | [**health**](docs/HealthApi.md#health) | **GET** /health | Checks if Typesense server is ready to accept requests. *KeysApi* | [**create_key**](docs/KeysApi.md#create_key) | **POST** /keys | Create an API Key *KeysApi* | [**delete_key**](docs/KeysApi.md#delete_key) | **DELETE** /keys/{keyId} | Delete an API key given its ID. *KeysApi* | [**get_key**](docs/KeysApi.md#get_key) | **GET** /keys/{keyId} | Retrieve (metadata about) a key *KeysApi* | [**get_keys**](docs/KeysApi.md#get_keys) | **GET** /keys | Retrieve (metadata about) all keys. +*OperationsApi* | [**get_schema_changes**](docs/OperationsApi.md#get_schema_changes) | **GET** /operations/schema_changes | Get the status of in-progress schema change operations +*OperationsApi* | [**retrieve_api_stats**](docs/OperationsApi.md#retrieve_api_stats) | **GET** /stats.json | Get stats about API endpoints. +*OperationsApi* | [**retrieve_metrics**](docs/OperationsApi.md#retrieve_metrics) | **GET** /metrics.json | Get current RAM, CPU, Disk & Network usage metrics. *OperationsApi* | [**take_snapshot**](docs/OperationsApi.md#take_snapshot) | **POST** /operations/snapshot | Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. *OperationsApi* | [**vote**](docs/OperationsApi.md#vote) | **POST** /operations/vote | Triggers a follower node to initiate the raft voting process, which triggers leader re-election. *OverrideApi* | [**get_search_override**](docs/OverrideApi.md#get_search_override) | **GET** /collections/{collectionName}/overrides/{overrideId} | Retrieve a single search override -*PromoteApi* | [**delete_search_override**](docs/PromoteApi.md#delete_search_override) | **DELETE** /collections/{collectionName}/overrides/{overrideId} | Delete an override associated with a collection -*PromoteApi* | [**get_search_overrides**](docs/PromoteApi.md#get_search_overrides) | **GET** /collections/{collectionName}/overrides | List all collection overrides -*PromoteApi* | [**upsert_search_override**](docs/PromoteApi.md#upsert_search_override) | **PUT** /collections/{collectionName}/overrides/{overrideId} | Create or update an override to promote certain documents over others +*PresetsApi* | [**delete_preset**](docs/PresetsApi.md#delete_preset) | **DELETE** /presets/{presetId} | Delete a preset. +*PresetsApi* | [**retrieve_all_presets**](docs/PresetsApi.md#retrieve_all_presets) | **GET** /presets | Retrieves all presets. +*PresetsApi* | [**retrieve_preset**](docs/PresetsApi.md#retrieve_preset) | **GET** /presets/{presetId} | Retrieves a preset. +*PresetsApi* | [**upsert_preset**](docs/PresetsApi.md#upsert_preset) | **PUT** /presets/{presetId} | Upserts a preset. +*StemmingApi* | [**get_stemming_dictionary**](docs/StemmingApi.md#get_stemming_dictionary) | **GET** /stemming/dictionaries/{dictionaryId} | Retrieve a stemming dictionary +*StemmingApi* | [**import_stemming_dictionary**](docs/StemmingApi.md#import_stemming_dictionary) | **POST** /stemming/dictionaries/import | Import a stemming dictionary +*StemmingApi* | [**list_stemming_dictionaries**](docs/StemmingApi.md#list_stemming_dictionaries) | **GET** /stemming/dictionaries | List all stemming dictionaries +*StopwordsApi* | [**delete_stopwords_set**](docs/StopwordsApi.md#delete_stopwords_set) | **DELETE** /stopwords/{setId} | Delete a stopwords set. +*StopwordsApi* | [**retrieve_stopwords_set**](docs/StopwordsApi.md#retrieve_stopwords_set) | **GET** /stopwords/{setId} | Retrieves a stopwords set. +*StopwordsApi* | [**retrieve_stopwords_sets**](docs/StopwordsApi.md#retrieve_stopwords_sets) | **GET** /stopwords | Retrieves all stopwords sets. +*StopwordsApi* | [**upsert_stopwords_set**](docs/StopwordsApi.md#upsert_stopwords_set) | **PUT** /stopwords/{setId} | Upserts a stopwords set. +*SynonymsApi* | [**delete_search_synonym**](docs/SynonymsApi.md#delete_search_synonym) | **DELETE** /collections/{collectionName}/synonyms/{synonymId} | Delete a synonym associated with a collection +*SynonymsApi* | [**get_search_synonym**](docs/SynonymsApi.md#get_search_synonym) | **GET** /collections/{collectionName}/synonyms/{synonymId} | Retrieve a single search synonym +*SynonymsApi* | [**get_search_synonyms**](docs/SynonymsApi.md#get_search_synonyms) | **GET** /collections/{collectionName}/synonyms | List all collection synonyms +*SynonymsApi* | [**upsert_search_synonym**](docs/SynonymsApi.md#upsert_search_synonym) | **PUT** /collections/{collectionName}/synonyms/{synonymId} | Create or update a synonym ## Documentation For Models + - [AnalyticsEventCreateResponse](docs/AnalyticsEventCreateResponse.md) + - [AnalyticsEventCreateSchema](docs/AnalyticsEventCreateSchema.md) + - [AnalyticsRuleDeleteResponse](docs/AnalyticsRuleDeleteResponse.md) - [AnalyticsRuleParameters](docs/AnalyticsRuleParameters.md) - [AnalyticsRuleParametersDestination](docs/AnalyticsRuleParametersDestination.md) - [AnalyticsRuleParametersSource](docs/AnalyticsRuleParametersSource.md) + - [AnalyticsRuleParametersSourceEventsInner](docs/AnalyticsRuleParametersSourceEventsInner.md) - [AnalyticsRuleSchema](docs/AnalyticsRuleSchema.md) + - [AnalyticsRuleUpsertSchema](docs/AnalyticsRuleUpsertSchema.md) - [AnalyticsRulesRetrieveSchema](docs/AnalyticsRulesRetrieveSchema.md) - [ApiKey](docs/ApiKey.md) + - [ApiKeyDeleteResponse](docs/ApiKeyDeleteResponse.md) - [ApiKeySchema](docs/ApiKeySchema.md) - [ApiKeysResponse](docs/ApiKeysResponse.md) - [ApiResponse](docs/ApiResponse.md) + - [ApiStatsResponse](docs/ApiStatsResponse.md) - [CollectionAlias](docs/CollectionAlias.md) - [CollectionAliasSchema](docs/CollectionAliasSchema.md) - [CollectionAliasesResponse](docs/CollectionAliasesResponse.md) - [CollectionResponse](docs/CollectionResponse.md) - [CollectionSchema](docs/CollectionSchema.md) - [CollectionUpdateSchema](docs/CollectionUpdateSchema.md) + - [ConversationModelCreateSchema](docs/ConversationModelCreateSchema.md) + - [ConversationModelSchema](docs/ConversationModelSchema.md) + - [ConversationModelUpdateSchema](docs/ConversationModelUpdateSchema.md) - [Debug200Response](docs/Debug200Response.md) - [DeleteDocuments200Response](docs/DeleteDocuments200Response.md) - - [DeleteDocumentsDeleteDocumentsParametersParameter](docs/DeleteDocumentsDeleteDocumentsParametersParameter.md) + - [DeleteStopwordsSet200Response](docs/DeleteStopwordsSet200Response.md) + - [DirtyValues](docs/DirtyValues.md) + - [DocumentIndexParameters](docs/DocumentIndexParameters.md) + - [DropTokensMode](docs/DropTokensMode.md) - [ErrorResponse](docs/ErrorResponse.md) - - [ExportDocumentsExportDocumentsParametersParameter](docs/ExportDocumentsExportDocumentsParametersParameter.md) - [FacetCounts](docs/FacetCounts.md) - [FacetCountsCountsInner](docs/FacetCountsCountsInner.md) - [FacetCountsStats](docs/FacetCountsStats.md) @@ -99,15 +133,24 @@ Class | Method | HTTP request | Description - [FieldEmbed](docs/FieldEmbed.md) - [FieldEmbedModelConfig](docs/FieldEmbedModelConfig.md) - [HealthStatus](docs/HealthStatus.md) - - [ImportDocumentsImportDocumentsParametersParameter](docs/ImportDocumentsImportDocumentsParametersParameter.md) + - [IndexAction](docs/IndexAction.md) + - [ListStemmingDictionaries200Response](docs/ListStemmingDictionaries200Response.md) - [MultiSearchCollectionParameters](docs/MultiSearchCollectionParameters.md) - [MultiSearchParameters](docs/MultiSearchParameters.md) - [MultiSearchResult](docs/MultiSearchResult.md) + - [MultiSearchResultItem](docs/MultiSearchResultItem.md) - [MultiSearchSearchesParameter](docs/MultiSearchSearchesParameter.md) + - [PresetDeleteSchema](docs/PresetDeleteSchema.md) + - [PresetSchema](docs/PresetSchema.md) + - [PresetUpsertSchema](docs/PresetUpsertSchema.md) + - [PresetUpsertSchemaValue](docs/PresetUpsertSchemaValue.md) + - [PresetsRetrieveSchema](docs/PresetsRetrieveSchema.md) + - [SchemaChangeStatus](docs/SchemaChangeStatus.md) - [ScopedKeyParameters](docs/ScopedKeyParameters.md) - [SearchGroupedHit](docs/SearchGroupedHit.md) - [SearchHighlight](docs/SearchHighlight.md) - [SearchOverride](docs/SearchOverride.md) + - [SearchOverrideDeleteResponse](docs/SearchOverrideDeleteResponse.md) - [SearchOverrideExclude](docs/SearchOverrideExclude.md) - [SearchOverrideInclude](docs/SearchOverrideInclude.md) - [SearchOverrideRule](docs/SearchOverrideRule.md) @@ -115,15 +158,25 @@ Class | Method | HTTP request | Description - [SearchOverridesResponse](docs/SearchOverridesResponse.md) - [SearchParameters](docs/SearchParameters.md) - [SearchResult](docs/SearchResult.md) + - [SearchResultConversation](docs/SearchResultConversation.md) - [SearchResultHit](docs/SearchResultHit.md) + - [SearchResultHitTextMatchInfo](docs/SearchResultHitTextMatchInfo.md) - [SearchResultRequestParams](docs/SearchResultRequestParams.md) + - [SearchResultRequestParamsVoiceQuery](docs/SearchResultRequestParamsVoiceQuery.md) - [SearchSynonym](docs/SearchSynonym.md) + - [SearchSynonymDeleteResponse](docs/SearchSynonymDeleteResponse.md) - [SearchSynonymSchema](docs/SearchSynonymSchema.md) - [SearchSynonymsResponse](docs/SearchSynonymsResponse.md) - [SnapshotParameters](docs/SnapshotParameters.md) + - [StemmingDictionary](docs/StemmingDictionary.md) + - [StemmingDictionaryWordsInner](docs/StemmingDictionaryWordsInner.md) + - [StopwordsSetRetrieveSchema](docs/StopwordsSetRetrieveSchema.md) + - [StopwordsSetSchema](docs/StopwordsSetSchema.md) + - [StopwordsSetUpsertSchema](docs/StopwordsSetUpsertSchema.md) + - [StopwordsSetsRetrieveAllSchema](docs/StopwordsSetsRetrieveAllSchema.md) - [SuccessStatus](docs/SuccessStatus.md) - [UpdateDocuments200Response](docs/UpdateDocuments200Response.md) - - [UpdateDocumentsUpdateDocumentsParametersParameter](docs/UpdateDocumentsUpdateDocumentsParametersParameter.md) + - [VoiceQueryModelCollectionConfig](docs/VoiceQueryModelCollectionConfig.md) To get access to the crate's generated documentation, use: diff --git a/typesense_codegen/docs/AnalyticsApi.md b/typesense_codegen/docs/AnalyticsApi.md index 7b6b7d0..f9ba22c 100644 --- a/typesense_codegen/docs/AnalyticsApi.md +++ b/typesense_codegen/docs/AnalyticsApi.md @@ -4,16 +4,48 @@ All URIs are relative to *http://localhost* Method | HTTP request | Description ------------- | ------------- | ------------- +[**create_analytics_event**](AnalyticsApi.md#create_analytics_event) | **POST** /analytics/events | Create an analytics event [**create_analytics_rule**](AnalyticsApi.md#create_analytics_rule) | **POST** /analytics/rules | Creates an analytics rule [**delete_analytics_rule**](AnalyticsApi.md#delete_analytics_rule) | **DELETE** /analytics/rules/{ruleName} | Delete an analytics rule [**retrieve_analytics_rule**](AnalyticsApi.md#retrieve_analytics_rule) | **GET** /analytics/rules/{ruleName} | Retrieves an analytics rule [**retrieve_analytics_rules**](AnalyticsApi.md#retrieve_analytics_rules) | **GET** /analytics/rules | Retrieves all analytics rules +[**upsert_analytics_rule**](AnalyticsApi.md#upsert_analytics_rule) | **PUT** /analytics/rules/{ruleName} | Upserts an analytics rule +## create_analytics_event + +> models::AnalyticsEventCreateResponse create_analytics_event(analytics_event_create_schema) +Create an analytics event + +Sending events for analytics e.g rank search results based on popularity. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**analytics_event_create_schema** | [**AnalyticsEventCreateSchema**](AnalyticsEventCreateSchema.md) | The Analytics event to be created | [required] | + +### Return type + +[**models::AnalyticsEventCreateResponse**](AnalyticsEventCreateResponse.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + ## create_analytics_rule -> crate::models::AnalyticsRuleSchema create_analytics_rule(analytics_rule_schema) +> models::AnalyticsRuleSchema create_analytics_rule(analytics_rule_schema) Creates an analytics rule When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. @@ -27,7 +59,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::AnalyticsRuleSchema**](AnalyticsRuleSchema.md) +[**models::AnalyticsRuleSchema**](AnalyticsRuleSchema.md) ### Authorization @@ -43,7 +75,7 @@ Name | Type | Description | Required | Notes ## delete_analytics_rule -> crate::models::AnalyticsRuleSchema delete_analytics_rule(rule_name) +> models::AnalyticsRuleDeleteResponse delete_analytics_rule(rule_name) Delete an analytics rule Permanently deletes an analytics rule, given it's name @@ -57,7 +89,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::AnalyticsRuleSchema**](AnalyticsRuleSchema.md) +[**models::AnalyticsRuleDeleteResponse**](AnalyticsRuleDeleteResponse.md) ### Authorization @@ -73,7 +105,7 @@ Name | Type | Description | Required | Notes ## retrieve_analytics_rule -> crate::models::AnalyticsRuleSchema retrieve_analytics_rule(rule_name) +> models::AnalyticsRuleSchema retrieve_analytics_rule(rule_name) Retrieves an analytics rule Retrieve the details of an analytics rule, given it's name @@ -87,7 +119,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::AnalyticsRuleSchema**](AnalyticsRuleSchema.md) +[**models::AnalyticsRuleSchema**](AnalyticsRuleSchema.md) ### Authorization @@ -103,7 +135,7 @@ Name | Type | Description | Required | Notes ## retrieve_analytics_rules -> crate::models::AnalyticsRulesRetrieveSchema retrieve_analytics_rules() +> models::AnalyticsRulesRetrieveSchema retrieve_analytics_rules() Retrieves all analytics rules Retrieve the details of all analytics rules @@ -114,7 +146,7 @@ This endpoint does not need any parameter. ### Return type -[**crate::models::AnalyticsRulesRetrieveSchema**](AnalyticsRulesRetrieveSchema.md) +[**models::AnalyticsRulesRetrieveSchema**](AnalyticsRulesRetrieveSchema.md) ### Authorization @@ -127,3 +159,34 @@ This endpoint does not need any parameter. [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +## upsert_analytics_rule + +> models::AnalyticsRuleSchema upsert_analytics_rule(rule_name, analytics_rule_upsert_schema) +Upserts an analytics rule + +Upserts an analytics rule with the given name. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**rule_name** | **String** | The name of the analytics rule to upsert | [required] | +**analytics_rule_upsert_schema** | [**AnalyticsRuleUpsertSchema**](AnalyticsRuleUpsertSchema.md) | The Analytics rule to be upserted | [required] | + +### Return type + +[**models::AnalyticsRuleSchema**](AnalyticsRuleSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/typesense_codegen/docs/UpdateDocumentsUpdateDocumentsParametersParameter.md b/typesense_codegen/docs/AnalyticsEventCreateResponse.md similarity index 72% rename from typesense_codegen/docs/UpdateDocumentsUpdateDocumentsParametersParameter.md rename to typesense_codegen/docs/AnalyticsEventCreateResponse.md index 0ba0554..321be7b 100644 --- a/typesense_codegen/docs/UpdateDocumentsUpdateDocumentsParametersParameter.md +++ b/typesense_codegen/docs/AnalyticsEventCreateResponse.md @@ -1,10 +1,10 @@ -# UpdateDocumentsUpdateDocumentsParametersParameter +# AnalyticsEventCreateResponse ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**filter_by** | Option<**String**> | | [optional] +**ok** | **bool** | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/ImportDocumentsImportDocumentsParametersParameter.md b/typesense_codegen/docs/AnalyticsEventCreateSchema.md similarity index 50% rename from typesense_codegen/docs/ImportDocumentsImportDocumentsParametersParameter.md rename to typesense_codegen/docs/AnalyticsEventCreateSchema.md index aea6558..ddb569b 100644 --- a/typesense_codegen/docs/ImportDocumentsImportDocumentsParametersParameter.md +++ b/typesense_codegen/docs/AnalyticsEventCreateSchema.md @@ -1,13 +1,12 @@ -# ImportDocumentsImportDocumentsParametersParameter +# AnalyticsEventCreateSchema ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**action** | Option<**String**> | | [optional] -**batch_size** | Option<**i32**> | | [optional] -**dirty_values** | Option<**String**> | | [optional] -**remote_embedding_batch_size** | Option<**i32**> | | [optional] +**data** | [**serde_json::Value**](.md) | | +**name** | **String** | | +**r#type** | **String** | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/AnalyticsRuleDeleteResponse.md b/typesense_codegen/docs/AnalyticsRuleDeleteResponse.md new file mode 100644 index 0000000..bd7512f --- /dev/null +++ b/typesense_codegen/docs/AnalyticsRuleDeleteResponse.md @@ -0,0 +1,11 @@ +# AnalyticsRuleDeleteResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **String** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/AnalyticsRuleParameters.md b/typesense_codegen/docs/AnalyticsRuleParameters.md index 31b4ac4..48d8f28 100644 --- a/typesense_codegen/docs/AnalyticsRuleParameters.md +++ b/typesense_codegen/docs/AnalyticsRuleParameters.md @@ -4,9 +4,10 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**source** | [**crate::models::AnalyticsRuleParametersSource**](AnalyticsRuleParameters_source.md) | | -**destination** | [**crate::models::AnalyticsRuleParametersDestination**](AnalyticsRuleParameters_destination.md) | | -**limit** | **i32** | | +**destination** | [**models::AnalyticsRuleParametersDestination**](AnalyticsRuleParametersDestination.md) | | +**expand_query** | Option<**bool**> | | [optional] +**limit** | Option<**i32**> | | [optional] +**source** | [**models::AnalyticsRuleParametersSource**](AnalyticsRuleParametersSource.md) | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/AnalyticsRuleParametersDestination.md b/typesense_codegen/docs/AnalyticsRuleParametersDestination.md index 6bf99e5..e5efc9b 100644 --- a/typesense_codegen/docs/AnalyticsRuleParametersDestination.md +++ b/typesense_codegen/docs/AnalyticsRuleParametersDestination.md @@ -4,7 +4,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**collection** | Option<**String**> | | [optional] +**collection** | **String** | | +**counter_field** | Option<**String**> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/AnalyticsRuleParametersSource.md b/typesense_codegen/docs/AnalyticsRuleParametersSource.md index b6a042b..05ab04d 100644 --- a/typesense_codegen/docs/AnalyticsRuleParametersSource.md +++ b/typesense_codegen/docs/AnalyticsRuleParametersSource.md @@ -4,7 +4,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**collections** | Option<**Vec**> | | [optional] +**collections** | **Vec** | | +**events** | Option<[**Vec**](AnalyticsRuleParametersSource_events_inner.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/AnalyticsRuleParametersSourceEventsInner.md b/typesense_codegen/docs/AnalyticsRuleParametersSourceEventsInner.md new file mode 100644 index 0000000..0abc726 --- /dev/null +++ b/typesense_codegen/docs/AnalyticsRuleParametersSourceEventsInner.md @@ -0,0 +1,13 @@ +# AnalyticsRuleParametersSourceEventsInner + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **String** | | +**r#type** | **String** | | +**weight** | **f32** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/AnalyticsRuleSchema.md b/typesense_codegen/docs/AnalyticsRuleSchema.md index 359d489..850d498 100644 --- a/typesense_codegen/docs/AnalyticsRuleSchema.md +++ b/typesense_codegen/docs/AnalyticsRuleSchema.md @@ -4,9 +4,9 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**name** | **String** | | +**params** | [**models::AnalyticsRuleParameters**](AnalyticsRuleParameters.md) | | **r#type** | **String** | | -**params** | [**crate::models::AnalyticsRuleParameters**](AnalyticsRuleParameters.md) | | +**name** | **String** | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/AnalyticsRuleUpsertSchema.md b/typesense_codegen/docs/AnalyticsRuleUpsertSchema.md new file mode 100644 index 0000000..83348b2 --- /dev/null +++ b/typesense_codegen/docs/AnalyticsRuleUpsertSchema.md @@ -0,0 +1,12 @@ +# AnalyticsRuleUpsertSchema + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**params** | [**models::AnalyticsRuleParameters**](AnalyticsRuleParameters.md) | | +**r#type** | **String** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/AnalyticsRulesRetrieveSchema.md b/typesense_codegen/docs/AnalyticsRulesRetrieveSchema.md index f20cf77..c310473 100644 --- a/typesense_codegen/docs/AnalyticsRulesRetrieveSchema.md +++ b/typesense_codegen/docs/AnalyticsRulesRetrieveSchema.md @@ -4,7 +4,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**rules** | Option<[**Vec**](AnalyticsRuleSchema.md)> | | [optional] +**rules** | Option<[**Vec**](AnalyticsRuleSchema.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/ApiKey.md b/typesense_codegen/docs/ApiKey.md index 94bb502..73253db 100644 --- a/typesense_codegen/docs/ApiKey.md +++ b/typesense_codegen/docs/ApiKey.md @@ -4,11 +4,11 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**value** | Option<**String**> | | [optional] -**description** | **String** | | **actions** | **Vec** | | **collections** | **Vec** | | +**description** | **String** | | **expires_at** | Option<**i64**> | | [optional] +**value** | Option<**String**> | | [optional] **id** | Option<**i64**> | | [optional][readonly] **value_prefix** | Option<**String**> | | [optional][readonly] diff --git a/typesense_codegen/docs/ApiKeyDeleteResponse.md b/typesense_codegen/docs/ApiKeyDeleteResponse.md new file mode 100644 index 0000000..8c38f9e --- /dev/null +++ b/typesense_codegen/docs/ApiKeyDeleteResponse.md @@ -0,0 +1,11 @@ +# ApiKeyDeleteResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **i64** | The id of the API key that was deleted | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/ApiKeySchema.md b/typesense_codegen/docs/ApiKeySchema.md index 761d240..0d39285 100644 --- a/typesense_codegen/docs/ApiKeySchema.md +++ b/typesense_codegen/docs/ApiKeySchema.md @@ -4,11 +4,11 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**value** | Option<**String**> | | [optional] -**description** | **String** | | **actions** | **Vec** | | **collections** | **Vec** | | +**description** | **String** | | **expires_at** | Option<**i64**> | | [optional] +**value** | Option<**String**> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/ApiKeysResponse.md b/typesense_codegen/docs/ApiKeysResponse.md index 1eac36f..c3e6c3d 100644 --- a/typesense_codegen/docs/ApiKeysResponse.md +++ b/typesense_codegen/docs/ApiKeysResponse.md @@ -4,7 +4,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**keys** | [**Vec**](ApiKey.md) | | +**keys** | [**Vec**](ApiKey.md) | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/ApiStatsResponse.md b/typesense_codegen/docs/ApiStatsResponse.md new file mode 100644 index 0000000..c2569e7 --- /dev/null +++ b/typesense_codegen/docs/ApiStatsResponse.md @@ -0,0 +1,23 @@ +# ApiStatsResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**delete_latency_ms** | Option<**f64**> | | [optional] +**delete_requests_per_second** | Option<**f64**> | | [optional] +**import_latency_ms** | Option<**f64**> | | [optional] +**import_requests_per_second** | Option<**f64**> | | [optional] +**latency_ms** | Option<[**serde_json::Value**](.md)> | | [optional] +**overloaded_requests_per_second** | Option<**f64**> | | [optional] +**pending_write_batches** | Option<**f64**> | | [optional] +**requests_per_second** | Option<[**serde_json::Value**](.md)> | | [optional] +**search_latency_ms** | Option<**f64**> | | [optional] +**search_requests_per_second** | Option<**f64**> | | [optional] +**total_requests_per_second** | Option<**f64**> | | [optional] +**write_latency_ms** | Option<**f64**> | | [optional] +**write_requests_per_second** | Option<**f64**> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/CollectionAlias.md b/typesense_codegen/docs/CollectionAlias.md index 5831360..87306df 100644 --- a/typesense_codegen/docs/CollectionAlias.md +++ b/typesense_codegen/docs/CollectionAlias.md @@ -4,8 +4,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**name** | **String** | Name of the collection alias | [readonly] **collection_name** | **String** | Name of the collection the alias mapped to | +**name** | **String** | Name of the collection alias | [readonly] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/CollectionAliasesResponse.md b/typesense_codegen/docs/CollectionAliasesResponse.md index b8a0399..285708c 100644 --- a/typesense_codegen/docs/CollectionAliasesResponse.md +++ b/typesense_codegen/docs/CollectionAliasesResponse.md @@ -4,7 +4,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**aliases** | [**Vec**](CollectionAlias.md) | | +**aliases** | [**Vec**](CollectionAlias.md) | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/CollectionResponse.md b/typesense_codegen/docs/CollectionResponse.md index aeb74a4..47b703b 100644 --- a/typesense_codegen/docs/CollectionResponse.md +++ b/typesense_codegen/docs/CollectionResponse.md @@ -4,14 +4,15 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**name** | **String** | Name of the collection | -**fields** | [**Vec**](Field.md) | A list of fields for querying, filtering and faceting | **default_sorting_field** | Option<**String**> | The name of an int32 / float field that determines the order in which the search results are ranked when a sort_by clause is not provided during searching. This field must indicate some kind of popularity. | [optional][default to ] -**token_separators** | Option<**Vec**> | List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. | [optional][default to []] **enable_nested_fields** | Option<**bool**> | Enables experimental support at a collection level for nested object or object array fields. This field is only available if the Typesense server is version `0.24.0.rcn34` or later. | [optional][default to false] +**fields** | [**Vec**](Field.md) | A list of fields for querying, filtering and faceting | +**name** | **String** | Name of the collection | **symbols_to_index** | Option<**Vec**> | List of symbols or special characters to be indexed. | [optional][default to []] -**num_documents** | **i64** | Number of documents in the collection | [readonly] +**token_separators** | Option<**Vec**> | List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. | [optional][default to []] +**voice_query_model** | Option<[**models::VoiceQueryModelCollectionConfig**](VoiceQueryModelCollectionConfig.md)> | | [optional] **created_at** | **i64** | Timestamp of when the collection was created (Unix epoch in seconds) | [readonly] +**num_documents** | **i64** | Number of documents in the collection | [readonly] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/CollectionSchema.md b/typesense_codegen/docs/CollectionSchema.md index 1f78262..9b7ae4a 100644 --- a/typesense_codegen/docs/CollectionSchema.md +++ b/typesense_codegen/docs/CollectionSchema.md @@ -4,12 +4,13 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**name** | **String** | Name of the collection | -**fields** | [**Vec**](Field.md) | A list of fields for querying, filtering and faceting | **default_sorting_field** | Option<**String**> | The name of an int32 / float field that determines the order in which the search results are ranked when a sort_by clause is not provided during searching. This field must indicate some kind of popularity. | [optional][default to ] -**token_separators** | Option<**Vec**> | List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. | [optional][default to []] **enable_nested_fields** | Option<**bool**> | Enables experimental support at a collection level for nested object or object array fields. This field is only available if the Typesense server is version `0.24.0.rcn34` or later. | [optional][default to false] +**fields** | [**Vec**](Field.md) | A list of fields for querying, filtering and faceting | +**name** | **String** | Name of the collection | **symbols_to_index** | Option<**Vec**> | List of symbols or special characters to be indexed. | [optional][default to []] +**token_separators** | Option<**Vec**> | List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. | [optional][default to []] +**voice_query_model** | Option<[**models::VoiceQueryModelCollectionConfig**](VoiceQueryModelCollectionConfig.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/CollectionUpdateSchema.md b/typesense_codegen/docs/CollectionUpdateSchema.md index d40d070..8fd57bc 100644 --- a/typesense_codegen/docs/CollectionUpdateSchema.md +++ b/typesense_codegen/docs/CollectionUpdateSchema.md @@ -4,7 +4,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**fields** | [**Vec**](Field.md) | A list of fields for querying, filtering and faceting | +**fields** | [**Vec**](Field.md) | A list of fields for querying, filtering and faceting | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/CollectionsApi.md b/typesense_codegen/docs/CollectionsApi.md index 14cb336..9dd1408 100644 --- a/typesense_codegen/docs/CollectionsApi.md +++ b/typesense_codegen/docs/CollectionsApi.md @@ -18,7 +18,7 @@ Method | HTTP request | Description ## create_collection -> crate::models::CollectionResponse create_collection(collection_schema) +> models::CollectionResponse create_collection(collection_schema) Create a new collection When a collection is created, we give it a name and describe the fields that will be indexed from the documents added to the collection. @@ -32,7 +32,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::CollectionResponse**](CollectionResponse.md) +[**models::CollectionResponse**](CollectionResponse.md) ### Authorization @@ -48,7 +48,7 @@ Name | Type | Description | Required | Notes ## delete_alias -> crate::models::CollectionAlias delete_alias(alias_name) +> models::CollectionAlias delete_alias(alias_name) Delete an alias ### Parameters @@ -60,7 +60,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::CollectionAlias**](CollectionAlias.md) +[**models::CollectionAlias**](CollectionAlias.md) ### Authorization @@ -76,7 +76,7 @@ Name | Type | Description | Required | Notes ## delete_collection -> crate::models::CollectionResponse delete_collection(collection_name) +> models::CollectionResponse delete_collection(collection_name) Delete a collection Permanently drops a collection. This action cannot be undone. For large collections, this might have an impact on read latencies. @@ -90,7 +90,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::CollectionResponse**](CollectionResponse.md) +[**models::CollectionResponse**](CollectionResponse.md) ### Authorization @@ -106,7 +106,7 @@ Name | Type | Description | Required | Notes ## get_alias -> crate::models::CollectionAlias get_alias(alias_name) +> models::CollectionAlias get_alias(alias_name) Retrieve an alias Find out which collection an alias points to by fetching it @@ -120,7 +120,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::CollectionAlias**](CollectionAlias.md) +[**models::CollectionAlias**](CollectionAlias.md) ### Authorization @@ -136,7 +136,7 @@ Name | Type | Description | Required | Notes ## get_aliases -> crate::models::CollectionAliasesResponse get_aliases() +> models::CollectionAliasesResponse get_aliases() List all aliases List all aliases and the corresponding collections that they map to. @@ -147,7 +147,7 @@ This endpoint does not need any parameter. ### Return type -[**crate::models::CollectionAliasesResponse**](CollectionAliasesResponse.md) +[**models::CollectionAliasesResponse**](CollectionAliasesResponse.md) ### Authorization @@ -163,7 +163,7 @@ This endpoint does not need any parameter. ## get_collection -> crate::models::CollectionResponse get_collection(collection_name) +> models::CollectionResponse get_collection(collection_name) Retrieve a single collection Retrieve the details of a collection, given its name. @@ -177,7 +177,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::CollectionResponse**](CollectionResponse.md) +[**models::CollectionResponse**](CollectionResponse.md) ### Authorization @@ -193,7 +193,7 @@ Name | Type | Description | Required | Notes ## get_collections -> Vec get_collections() +> Vec get_collections() List all collections Returns a summary of all your collections. The collections are returned sorted by creation date, with the most recent collections appearing first. @@ -204,7 +204,7 @@ This endpoint does not need any parameter. ### Return type -[**Vec**](CollectionResponse.md) +[**Vec**](CollectionResponse.md) ### Authorization @@ -220,7 +220,7 @@ This endpoint does not need any parameter. ## update_collection -> crate::models::CollectionUpdateSchema update_collection(collection_name, collection_update_schema) +> models::CollectionUpdateSchema update_collection(collection_name, collection_update_schema) Update a collection Update a collection's schema to modify the fields and their types. @@ -235,7 +235,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::CollectionUpdateSchema**](CollectionUpdateSchema.md) +[**models::CollectionUpdateSchema**](CollectionUpdateSchema.md) ### Authorization @@ -251,7 +251,7 @@ Name | Type | Description | Required | Notes ## upsert_alias -> crate::models::CollectionAlias upsert_alias(alias_name, collection_alias_schema) +> models::CollectionAlias upsert_alias(alias_name, collection_alias_schema) Create or update a collection alias Create or update a collection alias. An alias is a virtual collection name that points to a real collection. If you're familiar with symbolic links on Linux, it's very similar to that. Aliases are useful when you want to reindex your data in the background on a new collection and switch your application to it without any changes to your code. @@ -266,7 +266,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::CollectionAlias**](CollectionAlias.md) +[**models::CollectionAlias**](CollectionAlias.md) ### Authorization diff --git a/typesense_codegen/docs/ConversationModelCreateSchema.md b/typesense_codegen/docs/ConversationModelCreateSchema.md new file mode 100644 index 0000000..a070654 --- /dev/null +++ b/typesense_codegen/docs/ConversationModelCreateSchema.md @@ -0,0 +1,19 @@ +# ConversationModelCreateSchema + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**account_id** | Option<**String**> | LLM service's account ID (only applicable for Cloudflare) | [optional] +**api_key** | Option<**String**> | The LLM service's API Key | [optional] +**history_collection** | **String** | Typesense collection that stores the historical conversations | +**id** | Option<**String**> | An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. | [optional] +**max_bytes** | **i32** | The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. | +**model_name** | **String** | Name of the LLM model offered by OpenAI, Cloudflare or vLLM | +**system_prompt** | Option<**String**> | The system prompt that contains special instructions to the LLM | [optional] +**ttl** | Option<**i32**> | Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) | [optional] +**vllm_url** | Option<**String**> | URL of vLLM service | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/ConversationModelSchema.md b/typesense_codegen/docs/ConversationModelSchema.md new file mode 100644 index 0000000..74dfa4d --- /dev/null +++ b/typesense_codegen/docs/ConversationModelSchema.md @@ -0,0 +1,19 @@ +# ConversationModelSchema + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**account_id** | Option<**String**> | LLM service's account ID (only applicable for Cloudflare) | [optional] +**api_key** | Option<**String**> | The LLM service's API Key | [optional] +**history_collection** | **String** | Typesense collection that stores the historical conversations | +**id** | **String** | An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. | +**max_bytes** | **i32** | The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. | +**model_name** | **String** | Name of the LLM model offered by OpenAI, Cloudflare or vLLM | +**system_prompt** | Option<**String**> | The system prompt that contains special instructions to the LLM | [optional] +**ttl** | Option<**i32**> | Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) | [optional] +**vllm_url** | Option<**String**> | URL of vLLM service | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/ConversationModelUpdateSchema.md b/typesense_codegen/docs/ConversationModelUpdateSchema.md new file mode 100644 index 0000000..e12c165 --- /dev/null +++ b/typesense_codegen/docs/ConversationModelUpdateSchema.md @@ -0,0 +1,19 @@ +# ConversationModelUpdateSchema + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**account_id** | Option<**String**> | LLM service's account ID (only applicable for Cloudflare) | [optional] +**api_key** | Option<**String**> | The LLM service's API Key | [optional] +**history_collection** | Option<**String**> | Typesense collection that stores the historical conversations | [optional] +**id** | Option<**String**> | An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. | [optional] +**max_bytes** | Option<**i32**> | The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. | [optional] +**model_name** | Option<**String**> | Name of the LLM model offered by OpenAI, Cloudflare or vLLM | [optional] +**system_prompt** | Option<**String**> | The system prompt that contains special instructions to the LLM | [optional] +**ttl** | Option<**i32**> | Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) | [optional] +**vllm_url** | Option<**String**> | URL of vLLM service | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/ConversationsApi.md b/typesense_codegen/docs/ConversationsApi.md new file mode 100644 index 0000000..f61018c --- /dev/null +++ b/typesense_codegen/docs/ConversationsApi.md @@ -0,0 +1,161 @@ +# \ConversationsApi + +All URIs are relative to *http://localhost* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**create_conversation_model**](ConversationsApi.md#create_conversation_model) | **POST** /conversations/models | +[**delete_conversation_model**](ConversationsApi.md#delete_conversation_model) | **DELETE** /conversations/models/{modelId} | Delete a conversation model +[**retrieve_all_conversation_models**](ConversationsApi.md#retrieve_all_conversation_models) | **GET** /conversations/models | List all conversation models +[**retrieve_conversation_model**](ConversationsApi.md#retrieve_conversation_model) | **GET** /conversations/models/{modelId} | Retrieve a conversation model +[**update_conversation_model**](ConversationsApi.md#update_conversation_model) | **PUT** /conversations/models/{modelId} | Update a conversation model + + + +## create_conversation_model + +> models::ConversationModelSchema create_conversation_model(conversation_model_create_schema) + + +Create a Conversation Model + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**conversation_model_create_schema** | [**ConversationModelCreateSchema**](ConversationModelCreateSchema.md) | | [required] | + +### Return type + +[**models::ConversationModelSchema**](ConversationModelSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## delete_conversation_model + +> models::ConversationModelSchema delete_conversation_model(model_id) +Delete a conversation model + +Delete a conversation model + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**model_id** | **String** | The id of the conversation model to delete | [required] | + +### Return type + +[**models::ConversationModelSchema**](ConversationModelSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## retrieve_all_conversation_models + +> Vec retrieve_all_conversation_models() +List all conversation models + +Retrieve all conversation models + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +[**Vec**](ConversationModelSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## retrieve_conversation_model + +> models::ConversationModelSchema retrieve_conversation_model(model_id) +Retrieve a conversation model + +Retrieve a conversation model + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**model_id** | **String** | The id of the conversation model to retrieve | [required] | + +### Return type + +[**models::ConversationModelSchema**](ConversationModelSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## update_conversation_model + +> models::ConversationModelSchema update_conversation_model(model_id, conversation_model_update_schema) +Update a conversation model + +Update a conversation model + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**model_id** | **String** | The id of the conversation model to update | [required] | +**conversation_model_update_schema** | [**ConversationModelUpdateSchema**](ConversationModelUpdateSchema.md) | | [required] | + +### Return type + +[**models::ConversationModelSchema**](ConversationModelSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/typesense_codegen/docs/PromoteApi.md b/typesense_codegen/docs/CurationApi.md similarity index 71% rename from typesense_codegen/docs/PromoteApi.md rename to typesense_codegen/docs/CurationApi.md index 948b093..13a43fa 100644 --- a/typesense_codegen/docs/PromoteApi.md +++ b/typesense_codegen/docs/CurationApi.md @@ -1,18 +1,18 @@ -# \PromoteApi +# \CurationApi All URIs are relative to *http://localhost* Method | HTTP request | Description ------------- | ------------- | ------------- -[**delete_search_override**](PromoteApi.md#delete_search_override) | **DELETE** /collections/{collectionName}/overrides/{overrideId} | Delete an override associated with a collection -[**get_search_overrides**](PromoteApi.md#get_search_overrides) | **GET** /collections/{collectionName}/overrides | List all collection overrides -[**upsert_search_override**](PromoteApi.md#upsert_search_override) | **PUT** /collections/{collectionName}/overrides/{overrideId} | Create or update an override to promote certain documents over others +[**delete_search_override**](CurationApi.md#delete_search_override) | **DELETE** /collections/{collectionName}/overrides/{overrideId} | Delete an override associated with a collection +[**get_search_overrides**](CurationApi.md#get_search_overrides) | **GET** /collections/{collectionName}/overrides | List all collection overrides +[**upsert_search_override**](CurationApi.md#upsert_search_override) | **PUT** /collections/{collectionName}/overrides/{overrideId} | Create or update an override to promote certain documents over others ## delete_search_override -> crate::models::SearchOverride delete_search_override(collection_name, override_id) +> models::SearchOverrideDeleteResponse delete_search_override(collection_name, override_id) Delete an override associated with a collection ### Parameters @@ -25,7 +25,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::SearchOverride**](SearchOverride.md) +[**models::SearchOverrideDeleteResponse**](SearchOverrideDeleteResponse.md) ### Authorization @@ -41,7 +41,7 @@ Name | Type | Description | Required | Notes ## get_search_overrides -> crate::models::SearchOverridesResponse get_search_overrides(collection_name) +> models::SearchOverridesResponse get_search_overrides(collection_name) List all collection overrides ### Parameters @@ -53,7 +53,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::SearchOverridesResponse**](SearchOverridesResponse.md) +[**models::SearchOverridesResponse**](SearchOverridesResponse.md) ### Authorization @@ -69,7 +69,7 @@ Name | Type | Description | Required | Notes ## upsert_search_override -> crate::models::SearchOverride upsert_search_override(collection_name, override_id, search_override_schema) +> models::SearchOverride upsert_search_override(collection_name, override_id, search_override_schema) Create or update an override to promote certain documents over others Create or update an override to promote certain documents over others. Using overrides, you can include or exclude specific documents for a given query. @@ -85,7 +85,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::SearchOverride**](SearchOverride.md) +[**models::SearchOverride**](SearchOverride.md) ### Authorization diff --git a/typesense_codegen/docs/DebugApi.md b/typesense_codegen/docs/DebugApi.md index a1f8c0f..9acf06e 100644 --- a/typesense_codegen/docs/DebugApi.md +++ b/typesense_codegen/docs/DebugApi.md @@ -10,7 +10,7 @@ Method | HTTP request | Description ## debug -> crate::models::Debug200Response debug() +> models::Debug200Response debug() Print debugging information Print debugging information @@ -21,7 +21,7 @@ This endpoint does not need any parameter. ### Return type -[**crate::models::Debug200Response**](debug_200_response.md) +[**models::Debug200Response**](debug_200_response.md) ### Authorization diff --git a/typesense_codegen/docs/DeleteDocumentsDeleteDocumentsParametersParameter.md b/typesense_codegen/docs/DeleteDocumentsDeleteDocumentsParametersParameter.md deleted file mode 100644 index 433c2be..0000000 --- a/typesense_codegen/docs/DeleteDocumentsDeleteDocumentsParametersParameter.md +++ /dev/null @@ -1,12 +0,0 @@ -# DeleteDocumentsDeleteDocumentsParametersParameter - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**filter_by** | Option<**String**> | | [optional] -**batch_size** | Option<**i32**> | Batch size parameter controls the number of documents that should be deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. | [optional] - -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) - - diff --git a/typesense_codegen/docs/DeleteStopwordsSet200Response.md b/typesense_codegen/docs/DeleteStopwordsSet200Response.md new file mode 100644 index 0000000..919a663 --- /dev/null +++ b/typesense_codegen/docs/DeleteStopwordsSet200Response.md @@ -0,0 +1,11 @@ +# DeleteStopwordsSet200Response + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **String** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/DirtyValues.md b/typesense_codegen/docs/DirtyValues.md new file mode 100644 index 0000000..50492e1 --- /dev/null +++ b/typesense_codegen/docs/DirtyValues.md @@ -0,0 +1,15 @@ +# DirtyValues + +## Enum Variants + +| Name | Value | +|---- | -----| +| CoerceOrReject | coerce_or_reject | +| CoerceOrDrop | coerce_or_drop | +| Drop | drop | +| Reject | reject | + + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/DocumentIndexParameters.md b/typesense_codegen/docs/DocumentIndexParameters.md new file mode 100644 index 0000000..08f8e95 --- /dev/null +++ b/typesense_codegen/docs/DocumentIndexParameters.md @@ -0,0 +1,11 @@ +# DocumentIndexParameters + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**dirty_values** | Option<[**models::DirtyValues**](DirtyValues.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/DocumentsApi.md b/typesense_codegen/docs/DocumentsApi.md index eebec19..a3e79aa 100644 --- a/typesense_codegen/docs/DocumentsApi.md +++ b/typesense_codegen/docs/DocumentsApi.md @@ -7,13 +7,10 @@ Method | HTTP request | Description [**delete_document**](DocumentsApi.md#delete_document) | **DELETE** /collections/{collectionName}/documents/{documentId} | Delete a document [**delete_documents**](DocumentsApi.md#delete_documents) | **DELETE** /collections/{collectionName}/documents | Delete a bunch of documents [**delete_search_override**](DocumentsApi.md#delete_search_override) | **DELETE** /collections/{collectionName}/overrides/{overrideId} | Delete an override associated with a collection -[**delete_search_synonym**](DocumentsApi.md#delete_search_synonym) | **DELETE** /collections/{collectionName}/synonyms/{synonymId} | Delete a synonym associated with a collection [**export_documents**](DocumentsApi.md#export_documents) | **GET** /collections/{collectionName}/documents/export | Export all documents in a collection [**get_document**](DocumentsApi.md#get_document) | **GET** /collections/{collectionName}/documents/{documentId} | Retreive a document [**get_search_override**](DocumentsApi.md#get_search_override) | **GET** /collections/{collectionName}/overrides/{overrideId} | Retrieve a single search override [**get_search_overrides**](DocumentsApi.md#get_search_overrides) | **GET** /collections/{collectionName}/overrides | List all collection overrides -[**get_search_synonym**](DocumentsApi.md#get_search_synonym) | **GET** /collections/{collectionName}/synonyms/{synonymId} | Retrieve a single search synonym -[**get_search_synonyms**](DocumentsApi.md#get_search_synonyms) | **GET** /collections/{collectionName}/synonyms | List all collection synonyms [**import_documents**](DocumentsApi.md#import_documents) | **POST** /collections/{collectionName}/documents/import | Import documents into a collection [**index_document**](DocumentsApi.md#index_document) | **POST** /collections/{collectionName}/documents | Index a document [**multi_search**](DocumentsApi.md#multi_search) | **POST** /multi_search | send multiple search requests in a single HTTP request @@ -21,7 +18,6 @@ Method | HTTP request | Description [**update_document**](DocumentsApi.md#update_document) | **PATCH** /collections/{collectionName}/documents/{documentId} | Update a document [**update_documents**](DocumentsApi.md#update_documents) | **PATCH** /collections/{collectionName}/documents | Update documents with conditional query [**upsert_search_override**](DocumentsApi.md#upsert_search_override) | **PUT** /collections/{collectionName}/overrides/{overrideId} | Create or update an override to promote certain documents over others -[**upsert_search_synonym**](DocumentsApi.md#upsert_search_synonym) | **PUT** /collections/{collectionName}/synonyms/{synonymId} | Create or update a synonym @@ -58,7 +54,7 @@ Name | Type | Description | Required | Notes ## delete_documents -> crate::models::DeleteDocuments200Response delete_documents(collection_name, delete_documents_parameters) +> models::DeleteDocuments200Response delete_documents(collection_name, batch_size, filter_by, ignore_not_found, truncate) Delete a bunch of documents Delete a bunch of documents that match a specific filter condition. Use the `batch_size` parameter to control the number of documents that should deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. @@ -69,11 +65,14 @@ Delete a bunch of documents that match a specific filter condition. Use the `bat Name | Type | Description | Required | Notes ------------- | ------------- | ------------- | ------------- | ------------- **collection_name** | **String** | The name of the collection to delete documents from | [required] | -**delete_documents_parameters** | Option<[**DeleteDocumentsDeleteDocumentsParametersParameter**](.md)> | | | +**batch_size** | Option<**i32**> | | | +**filter_by** | Option<**String**> | | | +**ignore_not_found** | Option<**bool**> | | | +**truncate** | Option<**bool**> | | | ### Return type -[**crate::models::DeleteDocuments200Response**](deleteDocuments_200_response.md) +[**models::DeleteDocuments200Response**](deleteDocuments_200_response.md) ### Authorization @@ -89,7 +88,7 @@ Name | Type | Description | Required | Notes ## delete_search_override -> crate::models::SearchOverride delete_search_override(collection_name, override_id) +> models::SearchOverrideDeleteResponse delete_search_override(collection_name, override_id) Delete an override associated with a collection ### Parameters @@ -102,36 +101,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::SearchOverride**](SearchOverride.md) - -### Authorization - -[api_key_header](../README.md#api_key_header) - -### HTTP request headers - -- **Content-Type**: Not defined -- **Accept**: application/json - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - - -## delete_search_synonym - -> crate::models::SearchSynonym delete_search_synonym(collection_name, synonym_id) -Delete a synonym associated with a collection - -### Parameters - - -Name | Type | Description | Required | Notes -------------- | ------------- | ------------- | ------------- | ------------- -**collection_name** | **String** | The name of the collection | [required] | -**synonym_id** | **String** | The ID of the search synonym to delete | [required] | - -### Return type - -[**crate::models::SearchSynonym**](SearchSynonym.md) +[**models::SearchOverrideDeleteResponse**](SearchOverrideDeleteResponse.md) ### Authorization @@ -147,7 +117,7 @@ Name | Type | Description | Required | Notes ## export_documents -> String export_documents(collection_name, export_documents_parameters) +> String export_documents(collection_name, exclude_fields, filter_by, include_fields) Export all documents in a collection Export all documents in a collection in JSON lines format. @@ -158,7 +128,9 @@ Export all documents in a collection in JSON lines format. Name | Type | Description | Required | Notes ------------- | ------------- | ------------- | ------------- | ------------- **collection_name** | **String** | The name of the collection | [required] | -**export_documents_parameters** | Option<[**ExportDocumentsExportDocumentsParametersParameter**](.md)> | | | +**exclude_fields** | Option<**String**> | | | +**filter_by** | Option<**String**> | | | +**include_fields** | Option<**String**> | | | ### Return type @@ -209,7 +181,7 @@ Name | Type | Description | Required | Notes ## get_search_override -> crate::models::SearchOverride get_search_override(collection_name, override_id) +> models::SearchOverride get_search_override(collection_name, override_id) Retrieve a single search override Retrieve the details of a search override, given its id. @@ -224,7 +196,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::SearchOverride**](SearchOverride.md) +[**models::SearchOverride**](SearchOverride.md) ### Authorization @@ -240,7 +212,7 @@ Name | Type | Description | Required | Notes ## get_search_overrides -> crate::models::SearchOverridesResponse get_search_overrides(collection_name) +> models::SearchOverridesResponse get_search_overrides(collection_name) List all collection overrides ### Parameters @@ -252,66 +224,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::SearchOverridesResponse**](SearchOverridesResponse.md) - -### Authorization - -[api_key_header](../README.md#api_key_header) - -### HTTP request headers - -- **Content-Type**: Not defined -- **Accept**: application/json - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - - -## get_search_synonym - -> crate::models::SearchSynonym get_search_synonym(collection_name, synonym_id) -Retrieve a single search synonym - -Retrieve the details of a search synonym, given its id. - -### Parameters - - -Name | Type | Description | Required | Notes -------------- | ------------- | ------------- | ------------- | ------------- -**collection_name** | **String** | The name of the collection | [required] | -**synonym_id** | **String** | The id of the search synonym | [required] | - -### Return type - -[**crate::models::SearchSynonym**](SearchSynonym.md) - -### Authorization - -[api_key_header](../README.md#api_key_header) - -### HTTP request headers - -- **Content-Type**: Not defined -- **Accept**: application/json - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - - -## get_search_synonyms - -> crate::models::SearchSynonymsResponse get_search_synonyms(collection_name) -List all collection synonyms - -### Parameters - - -Name | Type | Description | Required | Notes -------------- | ------------- | ------------- | ------------- | ------------- -**collection_name** | **String** | The name of the collection | [required] | - -### Return type - -[**crate::models::SearchSynonymsResponse**](SearchSynonymsResponse.md) +[**models::SearchOverridesResponse**](SearchOverridesResponse.md) ### Authorization @@ -327,7 +240,7 @@ Name | Type | Description | Required | Notes ## import_documents -> String import_documents(collection_name, body, import_documents_parameters) +> String import_documents(collection_name, body, action, batch_size, dirty_values, remote_embedding_batch_size, return_doc, return_id) Import documents into a collection The documents to be imported must be formatted in a newline delimited JSON structure. You can feed the output file from a Typesense export operation directly as import. @@ -339,7 +252,12 @@ Name | Type | Description | Required | Notes ------------- | ------------- | ------------- | ------------- | ------------- **collection_name** | **String** | The name of the collection | [required] | **body** | **String** | The json array of documents or the JSONL file to import | [required] | -**import_documents_parameters** | Option<[**ImportDocumentsImportDocumentsParametersParameter**](.md)> | | | +**action** | Option<[**IndexAction**](.md)> | | | +**batch_size** | Option<**i32**> | | | +**dirty_values** | Option<[**DirtyValues**](.md)> | | | +**remote_embedding_batch_size** | Option<**i32**> | | | +**return_doc** | Option<**bool**> | | | +**return_id** | Option<**bool**> | | | ### Return type @@ -359,7 +277,7 @@ Name | Type | Description | Required | Notes ## index_document -> serde_json::Value index_document(collection_name, body, action) +> serde_json::Value index_document(collection_name, body, action, dirty_values) Index a document A document to be indexed in a given collection must conform to the schema of the collection. @@ -371,7 +289,8 @@ Name | Type | Description | Required | Notes ------------- | ------------- | ------------- | ------------- | ------------- **collection_name** | **String** | The name of the collection to add the document to | [required] | **body** | **serde_json::Value** | The document object to be indexed | [required] | -**action** | Option<**String**> | Additional action to perform | | +**action** | Option<**IndexAction**> | Additional action to perform | | +**dirty_values** | Option<[**DirtyValues**](.md)> | Dealing with Dirty Data | | ### Return type @@ -391,7 +310,7 @@ Name | Type | Description | Required | Notes ## multi_search -> crate::models::MultiSearchResult multi_search(multi_search_parameters, multi_search_searches_parameter) +> models::MultiSearchResult multi_search(cache_ttl, conversation, conversation_id, conversation_model_id, drop_tokens_mode, drop_tokens_threshold, enable_highlight_v1, enable_overrides, enable_synonyms, enable_typos_for_alpha_numerical_tokens, enable_typos_for_numerical_tokens, exclude_fields, exhaustive_search, facet_by, facet_query, facet_return_parent, facet_strategy, filter_by, filter_curated_hits, group_by, group_limit, group_missing_values, hidden_hits, highlight_affix_num_tokens, highlight_end_tag, highlight_fields, highlight_full_fields, highlight_start_tag, include_fields, infix, limit, max_candidates, max_extra_prefix, max_extra_suffix, max_facet_values, max_filter_by_candidates, min_len_1typo, min_len_2typo, num_typos, offset, override_tags, page, per_page, pinned_hits, pre_segmented_query, prefix, preset, prioritize_exact_match, prioritize_num_matching_fields, prioritize_token_position, q, query_by, query_by_weights, remote_embedding_num_tries, remote_embedding_timeout_ms, search_cutoff_ms, snippet_threshold, sort_by, split_join_tokens, stopwords, synonym_num_typos, synonym_prefix, text_match_type, typo_tokens_threshold, use_cache, vector_query, voice_query, multi_search_searches_parameter) send multiple search requests in a single HTTP request This is especially useful to avoid round-trip network latencies incurred otherwise if each of these requests are sent in separate HTTP requests. You can also use this feature to do a federated search across multiple collections in a single HTTP request. @@ -401,12 +320,78 @@ This is especially useful to avoid round-trip network latencies incurred otherwi Name | Type | Description | Required | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**multi_search_parameters** | [**MultiSearchParameters**](.md) | | [required] | +**cache_ttl** | Option<**i32**> | | | +**conversation** | Option<**bool**> | | | +**conversation_id** | Option<**String**> | | | +**conversation_model_id** | Option<**String**> | | | +**drop_tokens_mode** | Option<[**DropTokensMode**](.md)> | | | +**drop_tokens_threshold** | Option<**i32**> | | | +**enable_highlight_v1** | Option<**bool**> | | | +**enable_overrides** | Option<**bool**> | | | +**enable_synonyms** | Option<**bool**> | | | +**enable_typos_for_alpha_numerical_tokens** | Option<**bool**> | | | +**enable_typos_for_numerical_tokens** | Option<**bool**> | | | +**exclude_fields** | Option<**String**> | | | +**exhaustive_search** | Option<**bool**> | | | +**facet_by** | Option<**String**> | | | +**facet_query** | Option<**String**> | | | +**facet_return_parent** | Option<**String**> | | | +**facet_strategy** | Option<**String**> | | | +**filter_by** | Option<**String**> | | | +**filter_curated_hits** | Option<**bool**> | | | +**group_by** | Option<**String**> | | | +**group_limit** | Option<**i32**> | | | +**group_missing_values** | Option<**bool**> | | | +**hidden_hits** | Option<**String**> | | | +**highlight_affix_num_tokens** | Option<**i32**> | | | +**highlight_end_tag** | Option<**String**> | | | +**highlight_fields** | Option<**String**> | | | +**highlight_full_fields** | Option<**String**> | | | +**highlight_start_tag** | Option<**String**> | | | +**include_fields** | Option<**String**> | | | +**infix** | Option<**String**> | | | +**limit** | Option<**i32**> | | | +**max_candidates** | Option<**i32**> | | | +**max_extra_prefix** | Option<**i32**> | | | +**max_extra_suffix** | Option<**i32**> | | | +**max_facet_values** | Option<**i32**> | | | +**max_filter_by_candidates** | Option<**i32**> | | | +**min_len_1typo** | Option<**i32**> | | | +**min_len_2typo** | Option<**i32**> | | | +**num_typos** | Option<**String**> | | | +**offset** | Option<**i32**> | | | +**override_tags** | Option<**String**> | | | +**page** | Option<**i32**> | | | +**per_page** | Option<**i32**> | | | +**pinned_hits** | Option<**String**> | | | +**pre_segmented_query** | Option<**bool**> | | | +**prefix** | Option<**String**> | | | +**preset** | Option<**String**> | | | +**prioritize_exact_match** | Option<**bool**> | | | +**prioritize_num_matching_fields** | Option<**bool**> | | | +**prioritize_token_position** | Option<**bool**> | | | +**q** | Option<**String**> | | | +**query_by** | Option<**String**> | | | +**query_by_weights** | Option<**String**> | | | +**remote_embedding_num_tries** | Option<**i32**> | | | +**remote_embedding_timeout_ms** | Option<**i32**> | | | +**search_cutoff_ms** | Option<**i32**> | | | +**snippet_threshold** | Option<**i32**> | | | +**sort_by** | Option<**String**> | | | +**split_join_tokens** | Option<**String**> | | | +**stopwords** | Option<**String**> | | | +**synonym_num_typos** | Option<**i32**> | | | +**synonym_prefix** | Option<**bool**> | | | +**text_match_type** | Option<**String**> | | | +**typo_tokens_threshold** | Option<**i32**> | | | +**use_cache** | Option<**bool**> | | | +**vector_query** | Option<**String**> | | | +**voice_query** | Option<**String**> | | | **multi_search_searches_parameter** | Option<[**MultiSearchSearchesParameter**](MultiSearchSearchesParameter.md)> | | | ### Return type -[**crate::models::MultiSearchResult**](MultiSearchResult.md) +[**models::MultiSearchResult**](MultiSearchResult.md) ### Authorization @@ -422,7 +407,7 @@ Name | Type | Description | Required | Notes ## search_collection -> crate::models::SearchResult search_collection(collection_name, search_parameters) +> models::SearchResult search_collection(collection_name, cache_ttl, conversation, conversation_id, conversation_model_id, drop_tokens_mode, drop_tokens_threshold, enable_highlight_v1, enable_overrides, enable_synonyms, enable_typos_for_alpha_numerical_tokens, enable_typos_for_numerical_tokens, exclude_fields, exhaustive_search, facet_by, facet_query, facet_return_parent, facet_strategy, filter_by, filter_curated_hits, group_by, group_limit, group_missing_values, hidden_hits, highlight_affix_num_tokens, highlight_end_tag, highlight_fields, highlight_full_fields, highlight_start_tag, include_fields, infix, limit, max_candidates, max_extra_prefix, max_extra_suffix, max_facet_values, max_filter_by_candidates, min_len_1typo, min_len_2typo, num_typos, offset, override_tags, page, per_page, pinned_hits, pre_segmented_query, prefix, preset, prioritize_exact_match, prioritize_num_matching_fields, prioritize_token_position, q, query_by, query_by_weights, remote_embedding_num_tries, remote_embedding_timeout_ms, search_cutoff_ms, snippet_threshold, sort_by, split_join_tokens, stopwords, synonym_num_typos, synonym_prefix, text_match_type, typo_tokens_threshold, use_cache, vector_query, voice_query) Search for documents in a collection Search for documents in a collection that match the search criteria. @@ -433,11 +418,77 @@ Search for documents in a collection that match the search criteria. Name | Type | Description | Required | Notes ------------- | ------------- | ------------- | ------------- | ------------- **collection_name** | **String** | The name of the collection to search for the document under | [required] | -**search_parameters** | [**SearchParameters**](.md) | | [required] | +**cache_ttl** | Option<**i32**> | | | +**conversation** | Option<**bool**> | | | +**conversation_id** | Option<**String**> | | | +**conversation_model_id** | Option<**String**> | | | +**drop_tokens_mode** | Option<[**DropTokensMode**](.md)> | | | +**drop_tokens_threshold** | Option<**i32**> | | | +**enable_highlight_v1** | Option<**bool**> | | | +**enable_overrides** | Option<**bool**> | | | +**enable_synonyms** | Option<**bool**> | | | +**enable_typos_for_alpha_numerical_tokens** | Option<**bool**> | | | +**enable_typos_for_numerical_tokens** | Option<**bool**> | | | +**exclude_fields** | Option<**String**> | | | +**exhaustive_search** | Option<**bool**> | | | +**facet_by** | Option<**String**> | | | +**facet_query** | Option<**String**> | | | +**facet_return_parent** | Option<**String**> | | | +**facet_strategy** | Option<**String**> | | | +**filter_by** | Option<**String**> | | | +**filter_curated_hits** | Option<**bool**> | | | +**group_by** | Option<**String**> | | | +**group_limit** | Option<**i32**> | | | +**group_missing_values** | Option<**bool**> | | | +**hidden_hits** | Option<**String**> | | | +**highlight_affix_num_tokens** | Option<**i32**> | | | +**highlight_end_tag** | Option<**String**> | | | +**highlight_fields** | Option<**String**> | | | +**highlight_full_fields** | Option<**String**> | | | +**highlight_start_tag** | Option<**String**> | | | +**include_fields** | Option<**String**> | | | +**infix** | Option<**String**> | | | +**limit** | Option<**i32**> | | | +**max_candidates** | Option<**i32**> | | | +**max_extra_prefix** | Option<**i32**> | | | +**max_extra_suffix** | Option<**i32**> | | | +**max_facet_values** | Option<**i32**> | | | +**max_filter_by_candidates** | Option<**i32**> | | | +**min_len_1typo** | Option<**i32**> | | | +**min_len_2typo** | Option<**i32**> | | | +**num_typos** | Option<**String**> | | | +**offset** | Option<**i32**> | | | +**override_tags** | Option<**String**> | | | +**page** | Option<**i32**> | | | +**per_page** | Option<**i32**> | | | +**pinned_hits** | Option<**String**> | | | +**pre_segmented_query** | Option<**bool**> | | | +**prefix** | Option<**String**> | | | +**preset** | Option<**String**> | | | +**prioritize_exact_match** | Option<**bool**> | | | +**prioritize_num_matching_fields** | Option<**bool**> | | | +**prioritize_token_position** | Option<**bool**> | | | +**q** | Option<**String**> | | | +**query_by** | Option<**String**> | | | +**query_by_weights** | Option<**String**> | | | +**remote_embedding_num_tries** | Option<**i32**> | | | +**remote_embedding_timeout_ms** | Option<**i32**> | | | +**search_cutoff_ms** | Option<**i32**> | | | +**snippet_threshold** | Option<**i32**> | | | +**sort_by** | Option<**String**> | | | +**split_join_tokens** | Option<**String**> | | | +**stopwords** | Option<**String**> | | | +**synonym_num_typos** | Option<**i32**> | | | +**synonym_prefix** | Option<**bool**> | | | +**text_match_type** | Option<**String**> | | | +**typo_tokens_threshold** | Option<**i32**> | | | +**use_cache** | Option<**bool**> | | | +**vector_query** | Option<**String**> | | | +**voice_query** | Option<**String**> | | | ### Return type -[**crate::models::SearchResult**](SearchResult.md) +[**models::SearchResult**](SearchResult.md) ### Authorization @@ -453,7 +504,7 @@ Name | Type | Description | Required | Notes ## update_document -> serde_json::Value update_document(collection_name, document_id, body) +> serde_json::Value update_document(collection_name, document_id, body, dirty_values) Update a document Update an individual document from a collection by using its ID. The update can be partial. @@ -466,6 +517,7 @@ Name | Type | Description | Required | Notes **collection_name** | **String** | The name of the collection to search for the document under | [required] | **document_id** | **String** | The Document ID | [required] | **body** | **serde_json::Value** | The document object with fields to be updated | [required] | +**dirty_values** | Option<[**DirtyValues**](.md)> | Dealing with Dirty Data | | ### Return type @@ -485,7 +537,7 @@ Name | Type | Description | Required | Notes ## update_documents -> crate::models::UpdateDocuments200Response update_documents(collection_name, body, update_documents_parameters) +> models::UpdateDocuments200Response update_documents(collection_name, body, filter_by) Update documents with conditional query The filter_by query parameter is used to filter to specify a condition against which the documents are matched. The request body contains the fields that should be updated for any documents that match the filter condition. This endpoint is only available if the Typesense server is version `0.25.0.rc12` or later. @@ -497,11 +549,11 @@ Name | Type | Description | Required | Notes ------------- | ------------- | ------------- | ------------- | ------------- **collection_name** | **String** | The name of the collection to update documents in | [required] | **body** | **serde_json::Value** | The document fields to be updated | [required] | -**update_documents_parameters** | Option<[**UpdateDocumentsUpdateDocumentsParametersParameter**](.md)> | | | +**filter_by** | Option<**String**> | | | ### Return type -[**crate::models::UpdateDocuments200Response**](updateDocuments_200_response.md) +[**models::UpdateDocuments200Response**](updateDocuments_200_response.md) ### Authorization @@ -517,7 +569,7 @@ Name | Type | Description | Required | Notes ## upsert_search_override -> crate::models::SearchOverride upsert_search_override(collection_name, override_id, search_override_schema) +> models::SearchOverride upsert_search_override(collection_name, override_id, search_override_schema) Create or update an override to promote certain documents over others Create or update an override to promote certain documents over others. Using overrides, you can include or exclude specific documents for a given query. @@ -533,39 +585,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::SearchOverride**](SearchOverride.md) - -### Authorization - -[api_key_header](../README.md#api_key_header) - -### HTTP request headers - -- **Content-Type**: application/json -- **Accept**: application/json - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - - -## upsert_search_synonym - -> crate::models::SearchSynonym upsert_search_synonym(collection_name, synonym_id, search_synonym_schema) -Create or update a synonym - -Create or update a synonym to define search terms that should be considered equivalent. - -### Parameters - - -Name | Type | Description | Required | Notes -------------- | ------------- | ------------- | ------------- | ------------- -**collection_name** | **String** | The name of the collection | [required] | -**synonym_id** | **String** | The ID of the search synonym to create/update | [required] | -**search_synonym_schema** | [**SearchSynonymSchema**](SearchSynonymSchema.md) | The search synonym object to be created/updated | [required] | - -### Return type - -[**crate::models::SearchSynonym**](SearchSynonym.md) +[**models::SearchOverride**](SearchOverride.md) ### Authorization diff --git a/typesense_codegen/docs/DropTokensMode.md b/typesense_codegen/docs/DropTokensMode.md new file mode 100644 index 0000000..f6da263 --- /dev/null +++ b/typesense_codegen/docs/DropTokensMode.md @@ -0,0 +1,14 @@ +# DropTokensMode + +## Enum Variants + +| Name | Value | +|---- | -----| +| RightToLeft | right_to_left | +| LeftToRight | left_to_right | +| BothSidesColon3 | both_sides:3 | + + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/ExportDocumentsExportDocumentsParametersParameter.md b/typesense_codegen/docs/ExportDocumentsExportDocumentsParametersParameter.md deleted file mode 100644 index 14b7aaa..0000000 --- a/typesense_codegen/docs/ExportDocumentsExportDocumentsParametersParameter.md +++ /dev/null @@ -1,13 +0,0 @@ -# ExportDocumentsExportDocumentsParametersParameter - -## Properties - -Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- -**filter_by** | Option<**String**> | Filter conditions for refining your search results. Separate multiple conditions with &&. | [optional] -**include_fields** | **String** | List of fields from the document to include in the search result | -**exclude_fields** | **String** | List of fields from the document to exclude in the search result | - -[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) - - diff --git a/typesense_codegen/docs/FacetCounts.md b/typesense_codegen/docs/FacetCounts.md index e850701..4b170c1 100644 --- a/typesense_codegen/docs/FacetCounts.md +++ b/typesense_codegen/docs/FacetCounts.md @@ -4,9 +4,9 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**counts** | Option<[**Vec**](FacetCounts_counts_inner.md)> | | [optional] +**counts** | Option<[**Vec**](FacetCounts_counts_inner.md)> | | [optional] **field_name** | Option<**String**> | | [optional] -**stats** | Option<[**crate::models::FacetCountsStats**](FacetCounts_stats.md)> | | [optional] +**stats** | Option<[**models::FacetCountsStats**](FacetCounts_stats.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/FacetCountsCountsInner.md b/typesense_codegen/docs/FacetCountsCountsInner.md index 23befac..ebdef35 100644 --- a/typesense_codegen/docs/FacetCountsCountsInner.md +++ b/typesense_codegen/docs/FacetCountsCountsInner.md @@ -6,6 +6,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **count** | Option<**i32**> | | [optional] **highlighted** | Option<**String**> | | [optional] +**parent** | Option<[**serde_json::Value**](.md)> | | [optional] **value** | Option<**String**> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/FacetCountsStats.md b/typesense_codegen/docs/FacetCountsStats.md index 8917968..3dd5e79 100644 --- a/typesense_codegen/docs/FacetCountsStats.md +++ b/typesense_codegen/docs/FacetCountsStats.md @@ -4,11 +4,11 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**avg** | Option<**f64**> | | [optional] **max** | Option<**f64**> | | [optional] **min** | Option<**f64**> | | [optional] **sum** | Option<**f64**> | | [optional] **total_values** | Option<**i32**> | | [optional] -**avg** | Option<**f64**> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/Field.md b/typesense_codegen/docs/Field.md index dbf6a9e..fb5a6db 100644 --- a/typesense_codegen/docs/Field.md +++ b/typesense_codegen/docs/Field.md @@ -4,17 +4,25 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**name** | **String** | | -**r#type** | **String** | | -**optional** | Option<**bool**> | | [optional] +**drop** | Option<**bool**> | | [optional] +**embed** | Option<[**models::FieldEmbed**](Field_embed.md)> | | [optional] **facet** | Option<**bool**> | | [optional] **index** | Option<**bool**> | | [optional][default to true] -**locale** | Option<**String**> | | [optional] -**sort** | Option<**bool**> | | [optional] **infix** | Option<**bool**> | | [optional][default to false] +**locale** | Option<**String**> | | [optional] +**name** | **String** | | **num_dim** | Option<**i32**> | | [optional] -**drop** | Option<**bool**> | | [optional] -**embed** | Option<[**crate::models::FieldEmbed**](Field_embed.md)> | | [optional] +**optional** | Option<**bool**> | | [optional] +**range_index** | Option<**bool**> | Enables an index optimized for range filtering on numerical fields (e.g. rating:>3.5). Default: false. | [optional] +**reference** | Option<**String**> | Name of a field in another collection that should be linked to this collection so that it can be joined during query. | [optional] +**sort** | Option<**bool**> | | [optional] +**stem** | Option<**bool**> | Values are stemmed before indexing in-memory. Default: false. | [optional] +**stem_dictionary** | Option<**String**> | Name of the stemming dictionary to use for this field | [optional] +**store** | Option<**bool**> | When set to false, the field value will not be stored on disk. Default: true. | [optional] +**symbols_to_index** | Option<**Vec**> | List of symbols or special characters to be indexed. | [optional][default to []] +**token_separators** | Option<**Vec**> | List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. | [optional][default to []] +**r#type** | **String** | | +**vec_dist** | Option<**String**> | The distance metric to be used for vector search. Default: `cosine`. You can also use `ip` for inner product. | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/FieldEmbed.md b/typesense_codegen/docs/FieldEmbed.md index b1dc03b..dca3983 100644 --- a/typesense_codegen/docs/FieldEmbed.md +++ b/typesense_codegen/docs/FieldEmbed.md @@ -5,7 +5,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **from** | **Vec** | | -**model_config** | [**crate::models::FieldEmbedModelConfig**](Field_embed_model_config.md) | | +**model_config** | [**models::FieldEmbedModelConfig**](Field_embed_model_config.md) | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/FieldEmbedModelConfig.md b/typesense_codegen/docs/FieldEmbedModelConfig.md index 88c03af..e19321e 100644 --- a/typesense_codegen/docs/FieldEmbedModelConfig.md +++ b/typesense_codegen/docs/FieldEmbedModelConfig.md @@ -4,12 +4,16 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**model_name** | **String** | | -**api_key** | Option<**String**> | | [optional] **access_token** | Option<**String**> | | [optional] +**api_key** | Option<**String**> | | [optional] **client_id** | Option<**String**> | | [optional] **client_secret** | Option<**String**> | | [optional] +**indexing_prefix** | Option<**String**> | | [optional] +**model_name** | **String** | | **project_id** | Option<**String**> | | [optional] +**query_prefix** | Option<**String**> | | [optional] +**refresh_token** | Option<**String**> | | [optional] +**url** | Option<**String**> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/HealthApi.md b/typesense_codegen/docs/HealthApi.md index 8c4d523..d7ed07c 100644 --- a/typesense_codegen/docs/HealthApi.md +++ b/typesense_codegen/docs/HealthApi.md @@ -10,7 +10,7 @@ Method | HTTP request | Description ## health -> crate::models::HealthStatus health() +> models::HealthStatus health() Checks if Typesense server is ready to accept requests. Checks if Typesense server is ready to accept requests. @@ -21,7 +21,7 @@ This endpoint does not need any parameter. ### Return type -[**crate::models::HealthStatus**](HealthStatus.md) +[**models::HealthStatus**](HealthStatus.md) ### Authorization diff --git a/typesense_codegen/docs/IndexAction.md b/typesense_codegen/docs/IndexAction.md new file mode 100644 index 0000000..7c450eb --- /dev/null +++ b/typesense_codegen/docs/IndexAction.md @@ -0,0 +1,15 @@ +# IndexAction + +## Enum Variants + +| Name | Value | +|---- | -----| +| Create | create | +| Update | update | +| Upsert | upsert | +| Emplace | emplace | + + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/KeysApi.md b/typesense_codegen/docs/KeysApi.md index 422c127..e7f4bc1 100644 --- a/typesense_codegen/docs/KeysApi.md +++ b/typesense_codegen/docs/KeysApi.md @@ -13,7 +13,7 @@ Method | HTTP request | Description ## create_key -> crate::models::ApiKey create_key(api_key_schema) +> models::ApiKey create_key(api_key_schema) Create an API Key Create an API Key with fine-grain access control. You can restrict access on both a per-collection and per-action level. The generated key is returned only during creation. You want to store this key carefully in a secure place. @@ -27,7 +27,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::ApiKey**](ApiKey.md) +[**models::ApiKey**](ApiKey.md) ### Authorization @@ -43,7 +43,7 @@ Name | Type | Description | Required | Notes ## delete_key -> crate::models::ApiKey delete_key(key_id) +> models::ApiKeyDeleteResponse delete_key(key_id) Delete an API key given its ID. ### Parameters @@ -55,7 +55,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::ApiKey**](ApiKey.md) +[**models::ApiKeyDeleteResponse**](ApiKeyDeleteResponse.md) ### Authorization @@ -71,7 +71,7 @@ Name | Type | Description | Required | Notes ## get_key -> crate::models::ApiKey get_key(key_id) +> models::ApiKey get_key(key_id) Retrieve (metadata about) a key Retrieve (metadata about) a key. Only the key prefix is returned when you retrieve a key. Due to security reasons, only the create endpoint returns the full API key. @@ -85,7 +85,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::ApiKey**](ApiKey.md) +[**models::ApiKey**](ApiKey.md) ### Authorization @@ -101,7 +101,7 @@ Name | Type | Description | Required | Notes ## get_keys -> crate::models::ApiKeysResponse get_keys() +> models::ApiKeysResponse get_keys() Retrieve (metadata about) all keys. ### Parameters @@ -110,7 +110,7 @@ This endpoint does not need any parameter. ### Return type -[**crate::models::ApiKeysResponse**](ApiKeysResponse.md) +[**models::ApiKeysResponse**](ApiKeysResponse.md) ### Authorization diff --git a/typesense_codegen/docs/ListStemmingDictionaries200Response.md b/typesense_codegen/docs/ListStemmingDictionaries200Response.md new file mode 100644 index 0000000..d56a82d --- /dev/null +++ b/typesense_codegen/docs/ListStemmingDictionaries200Response.md @@ -0,0 +1,11 @@ +# ListStemmingDictionaries200Response + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**dictionaries** | Option<**Vec**> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/MultiSearchCollectionParameters.md b/typesense_codegen/docs/MultiSearchCollectionParameters.md index 07be3ee..2f65746 100644 --- a/typesense_codegen/docs/MultiSearchCollectionParameters.md +++ b/typesense_codegen/docs/MultiSearchCollectionParameters.md @@ -4,52 +4,72 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**q** | Option<**String**> | The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. | [optional] -**query_by** | Option<**String**> | A list of `string` fields that should be queried against. Multiple fields are separated with a comma. | [optional] -**query_by_weights** | Option<**String**> | The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. | [optional] -**text_match_type** | Option<**String**> | In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. | [optional] -**prefix** | Option<**String**> | Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. | [optional] +**cache_ttl** | Option<**i32**> | The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. | [optional] +**conversation** | Option<**bool**> | Enable conversational search. | [optional] +**conversation_id** | Option<**String**> | The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. | [optional] +**conversation_model_id** | Option<**String**> | The Id of Conversation Model to be used. | [optional] +**drop_tokens_mode** | Option<[**models::DropTokensMode**](DropTokensMode.md)> | | [optional] +**drop_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 | [optional] +**enable_overrides** | Option<**bool**> | If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false | [optional][default to false] +**enable_synonyms** | Option<**bool**> | If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true | [optional] +**enable_typos_for_alpha_numerical_tokens** | Option<**bool**> | Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. | [optional] +**enable_typos_for_numerical_tokens** | Option<**bool**> | Make Typesense disable typos for numerical tokens. | [optional][default to true] +**exclude_fields** | Option<**String**> | List of fields from the document to exclude in the search result | [optional] +**exhaustive_search** | Option<**bool**> | Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). | [optional] +**facet_by** | Option<**String**> | A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. | [optional] +**facet_query** | Option<**String**> | Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". | [optional] +**facet_return_parent** | Option<**String**> | Comma separated string of nested facet fields whose parent object should be returned in facet response. | [optional] +**facet_strategy** | Option<**String**> | Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). | [optional] +**filter_by** | Option<**String**> | Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. | [optional] +**filter_curated_hits** | Option<**bool**> | Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false | [optional] +**group_by** | Option<**String**> | You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. | [optional] +**group_limit** | Option<**i32**> | Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 | [optional] +**group_missing_values** | Option<**bool**> | Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true | [optional] +**hidden_hits** | Option<**String**> | A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] +**highlight_affix_num_tokens** | Option<**i32**> | The number of tokens that should surround the highlighted text on each side. Default: 4 | [optional] +**highlight_end_tag** | Option<**String**> | The end tag used for the highlighted snippets. Default: `` | [optional] +**highlight_fields** | Option<**String**> | A list of custom fields that must be highlighted even if you don't query for them | [optional] +**highlight_full_fields** | Option<**String**> | List of fields which should be highlighted fully without snippeting | [optional] +**highlight_start_tag** | Option<**String**> | The start tag used for the highlighted snippets. Default: `` | [optional] +**include_fields** | Option<**String**> | List of fields from the document to include in the search result | [optional] **infix** | Option<**String**> | If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results | [optional] +**limit** | Option<**i32**> | Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. | [optional] **max_extra_prefix** | Option<**i32**> | There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. | [optional] **max_extra_suffix** | Option<**i32**> | There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. | [optional] -**filter_by** | Option<**String**> | Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. | [optional] -**sort_by** | Option<**String**> | A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` | [optional] -**facet_by** | Option<**String**> | A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. | [optional] **max_facet_values** | Option<**i32**> | Maximum number of facet values to be returned. | [optional] -**facet_query** | Option<**String**> | Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". | [optional] +**min_len_1typo** | Option<**i32**> | Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] +**min_len_2typo** | Option<**i32**> | Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] **num_typos** | Option<**String**> | The number of typographical errors (1 or 2) that would be tolerated. Default: 2 | [optional] +**offset** | Option<**i32**> | Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. | [optional] +**override_tags** | Option<**String**> | Comma separated list of tags to trigger the curations rules that match the tags. | [optional] **page** | Option<**i32**> | Results from this specific page number would be fetched. | [optional] **per_page** | Option<**i32**> | Number of results to fetch per page. Default: 10 | [optional] -**limit** | Option<**i32**> | Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. | [optional] -**offset** | Option<**i32**> | Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. | [optional] -**group_by** | Option<**String**> | You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. | [optional] -**group_limit** | Option<**i32**> | Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 | [optional] -**include_fields** | Option<**String**> | List of fields from the document to include in the search result | [optional] -**exclude_fields** | Option<**String**> | List of fields from the document to exclude in the search result | [optional] -**highlight_full_fields** | Option<**String**> | List of fields which should be highlighted fully without snippeting | [optional] -**highlight_affix_num_tokens** | Option<**i32**> | The number of tokens that should surround the highlighted text on each side. Default: 4 | [optional] -**highlight_start_tag** | Option<**String**> | The start tag used for the highlighted snippets. Default: `` | [optional] -**highlight_end_tag** | Option<**String**> | The end tag used for the highlighted snippets. Default: `` | [optional] +**pinned_hits** | Option<**String**> | A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] +**pre_segmented_query** | Option<**bool**> | You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same | [optional][default to false] +**prefix** | Option<**String**> | Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. | [optional] +**preset** | Option<**String**> | Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. | [optional] +**prioritize_exact_match** | Option<**bool**> | Set this parameter to true to ensure that an exact match is ranked above the others | [optional][default to true] +**prioritize_num_matching_fields** | Option<**bool**> | Make Typesense prioritize documents where the query words appear in more number of fields. | [optional][default to true] +**prioritize_token_position** | Option<**bool**> | Make Typesense prioritize documents where the query words appear earlier in the text. | [optional][default to false] +**q** | Option<**String**> | The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. | [optional] +**query_by** | Option<**String**> | A list of `string` fields that should be queried against. Multiple fields are separated with a comma. | [optional] +**query_by_weights** | Option<**String**> | The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. | [optional] +**remote_embedding_num_tries** | Option<**i32**> | Number of times to retry fetching remote embeddings. | [optional] +**remote_embedding_timeout_ms** | Option<**i32**> | Timeout (in milliseconds) for fetching remote embeddings. | [optional] +**search_cutoff_ms** | Option<**i32**> | Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. | [optional] **snippet_threshold** | Option<**i32**> | Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 | [optional] -**drop_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 | [optional] +**sort_by** | Option<**String**> | A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` | [optional] +**stopwords** | Option<**String**> | Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. | [optional] +**synonym_num_typos** | Option<**i32**> | Allow synonym resolution on typo-corrected words in the query. Default: 0 | [optional] +**synonym_prefix** | Option<**bool**> | Allow synonym resolution on word prefixes in the query. Default: false | [optional] +**text_match_type** | Option<**String**> | In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. | [optional] **typo_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 | [optional] -**pinned_hits** | Option<**String**> | A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] -**hidden_hits** | Option<**String**> | A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] -**highlight_fields** | Option<**String**> | A list of custom fields that must be highlighted even if you don't query for them | [optional] -**pre_segmented_query** | Option<**bool**> | You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same | [optional] -**preset** | Option<**String**> | Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. | [optional] -**enable_overrides** | Option<**bool**> | If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false | [optional] -**prioritize_exact_match** | Option<**bool**> | Set this parameter to true to ensure that an exact match is ranked above the others | [optional] -**exhaustive_search** | Option<**bool**> | Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). | [optional] -**search_cutoff_ms** | Option<**i32**> | Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. | [optional] **use_cache** | Option<**bool**> | Enable server side caching of search query results. By default, caching is disabled. | [optional] -**cache_ttl** | Option<**i32**> | The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. | [optional] -**min_len_1typo** | Option<**i32**> | Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] -**min_len_2typo** | Option<**i32**> | Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] **vector_query** | Option<**String**> | Vector query expression for fetching documents \"closest\" to a given query/document vector. | [optional] -**remote_embedding_timeout_ms** | Option<**i32**> | Timeout (in milliseconds) for fetching remote embeddings. | [optional] -**remote_embedding_num_tries** | Option<**i32**> | Number of times to retry fetching remote embeddings. | [optional] -**collection** | **String** | The collection to search in. | +**voice_query** | Option<**String**> | The base64 encoded audio file in 16 khz 16-bit WAV format. | [optional] +**collection** | Option<**String**> | The collection to search in. | [optional] +**rerank_hybrid_matches** | Option<**bool**> | When true, computes both text match and vector distance scores for all matches in hybrid search. Documents found only through keyword search will get a vector distance score, and documents found only through vector search will get a text match score. | [optional][default to false] +**x_typesense_api_key** | Option<**String**> | A separate search API key for each search within a multi_search request | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/MultiSearchParameters.md b/typesense_codegen/docs/MultiSearchParameters.md index 20a33a7..7f569f4 100644 --- a/typesense_codegen/docs/MultiSearchParameters.md +++ b/typesense_codegen/docs/MultiSearchParameters.md @@ -4,51 +4,69 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**q** | Option<**String**> | The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. | [optional] -**query_by** | Option<**String**> | A list of `string` fields that should be queried against. Multiple fields are separated with a comma. | [optional] -**query_by_weights** | Option<**String**> | The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. | [optional] -**text_match_type** | Option<**String**> | In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. | [optional] -**prefix** | Option<**String**> | Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. | [optional] +**cache_ttl** | Option<**i32**> | The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. | [optional] +**conversation** | Option<**bool**> | Enable conversational search. | [optional] +**conversation_id** | Option<**String**> | The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. | [optional] +**conversation_model_id** | Option<**String**> | The Id of Conversation Model to be used. | [optional] +**drop_tokens_mode** | Option<[**models::DropTokensMode**](DropTokensMode.md)> | | [optional] +**drop_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 | [optional] +**enable_overrides** | Option<**bool**> | If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false | [optional][default to false] +**enable_synonyms** | Option<**bool**> | If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true | [optional] +**enable_typos_for_alpha_numerical_tokens** | Option<**bool**> | Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. | [optional] +**enable_typos_for_numerical_tokens** | Option<**bool**> | Make Typesense disable typos for numerical tokens. | [optional][default to true] +**exclude_fields** | Option<**String**> | List of fields from the document to exclude in the search result | [optional] +**exhaustive_search** | Option<**bool**> | Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). | [optional] +**facet_by** | Option<**String**> | A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. | [optional] +**facet_query** | Option<**String**> | Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". | [optional] +**facet_return_parent** | Option<**String**> | Comma separated string of nested facet fields whose parent object should be returned in facet response. | [optional] +**facet_strategy** | Option<**String**> | Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). | [optional] +**filter_by** | Option<**String**> | Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. | [optional] +**filter_curated_hits** | Option<**bool**> | Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false | [optional] +**group_by** | Option<**String**> | You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. | [optional] +**group_limit** | Option<**i32**> | Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 | [optional] +**group_missing_values** | Option<**bool**> | Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true | [optional] +**hidden_hits** | Option<**String**> | A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] +**highlight_affix_num_tokens** | Option<**i32**> | The number of tokens that should surround the highlighted text on each side. Default: 4 | [optional] +**highlight_end_tag** | Option<**String**> | The end tag used for the highlighted snippets. Default: `` | [optional] +**highlight_fields** | Option<**String**> | A list of custom fields that must be highlighted even if you don't query for them | [optional] +**highlight_full_fields** | Option<**String**> | List of fields which should be highlighted fully without snippeting | [optional] +**highlight_start_tag** | Option<**String**> | The start tag used for the highlighted snippets. Default: `` | [optional] +**include_fields** | Option<**String**> | List of fields from the document to include in the search result | [optional] **infix** | Option<**String**> | If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results | [optional] +**limit** | Option<**i32**> | Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. | [optional] **max_extra_prefix** | Option<**i32**> | There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. | [optional] **max_extra_suffix** | Option<**i32**> | There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. | [optional] -**filter_by** | Option<**String**> | Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. | [optional] -**sort_by** | Option<**String**> | A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` | [optional] -**facet_by** | Option<**String**> | A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. | [optional] **max_facet_values** | Option<**i32**> | Maximum number of facet values to be returned. | [optional] -**facet_query** | Option<**String**> | Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". | [optional] +**min_len_1typo** | Option<**i32**> | Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] +**min_len_2typo** | Option<**i32**> | Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] **num_typos** | Option<**String**> | The number of typographical errors (1 or 2) that would be tolerated. Default: 2 | [optional] +**offset** | Option<**i32**> | Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. | [optional] +**override_tags** | Option<**String**> | Comma separated list of tags to trigger the curations rules that match the tags. | [optional] **page** | Option<**i32**> | Results from this specific page number would be fetched. | [optional] **per_page** | Option<**i32**> | Number of results to fetch per page. Default: 10 | [optional] -**limit** | Option<**i32**> | Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. | [optional] -**offset** | Option<**i32**> | Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. | [optional] -**group_by** | Option<**String**> | You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. | [optional] -**group_limit** | Option<**i32**> | Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 | [optional] -**include_fields** | Option<**String**> | List of fields from the document to include in the search result | [optional] -**exclude_fields** | Option<**String**> | List of fields from the document to exclude in the search result | [optional] -**highlight_full_fields** | Option<**String**> | List of fields which should be highlighted fully without snippeting | [optional] -**highlight_affix_num_tokens** | Option<**i32**> | The number of tokens that should surround the highlighted text on each side. Default: 4 | [optional] -**highlight_start_tag** | Option<**String**> | The start tag used for the highlighted snippets. Default: `` | [optional] -**highlight_end_tag** | Option<**String**> | The end tag used for the highlighted snippets. Default: `` | [optional] +**pinned_hits** | Option<**String**> | A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] +**pre_segmented_query** | Option<**bool**> | You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same | [optional][default to false] +**prefix** | Option<**String**> | Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. | [optional] +**preset** | Option<**String**> | Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. | [optional] +**prioritize_exact_match** | Option<**bool**> | Set this parameter to true to ensure that an exact match is ranked above the others | [optional][default to true] +**prioritize_num_matching_fields** | Option<**bool**> | Make Typesense prioritize documents where the query words appear in more number of fields. | [optional][default to true] +**prioritize_token_position** | Option<**bool**> | Make Typesense prioritize documents where the query words appear earlier in the text. | [optional][default to false] +**q** | Option<**String**> | The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. | [optional] +**query_by** | Option<**String**> | A list of `string` fields that should be queried against. Multiple fields are separated with a comma. | [optional] +**query_by_weights** | Option<**String**> | The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. | [optional] +**remote_embedding_num_tries** | Option<**i32**> | Number of times to retry fetching remote embeddings. | [optional] +**remote_embedding_timeout_ms** | Option<**i32**> | Timeout (in milliseconds) for fetching remote embeddings. | [optional] +**search_cutoff_ms** | Option<**i32**> | Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. | [optional] **snippet_threshold** | Option<**i32**> | Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 | [optional] -**drop_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 | [optional] +**sort_by** | Option<**String**> | A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` | [optional] +**stopwords** | Option<**String**> | Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. | [optional] +**synonym_num_typos** | Option<**i32**> | Allow synonym resolution on typo-corrected words in the query. Default: 0 | [optional] +**synonym_prefix** | Option<**bool**> | Allow synonym resolution on word prefixes in the query. Default: false | [optional] +**text_match_type** | Option<**String**> | In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. | [optional] **typo_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 | [optional] -**pinned_hits** | Option<**String**> | A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] -**hidden_hits** | Option<**String**> | A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] -**highlight_fields** | Option<**String**> | A list of custom fields that must be highlighted even if you don't query for them | [optional] -**pre_segmented_query** | Option<**bool**> | You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same | [optional] -**preset** | Option<**String**> | Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. | [optional] -**enable_overrides** | Option<**bool**> | If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false | [optional] -**prioritize_exact_match** | Option<**bool**> | Set this parameter to true to ensure that an exact match is ranked above the others | [optional] -**exhaustive_search** | Option<**bool**> | Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). | [optional] -**search_cutoff_ms** | Option<**i32**> | Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. | [optional] **use_cache** | Option<**bool**> | Enable server side caching of search query results. By default, caching is disabled. | [optional] -**cache_ttl** | Option<**i32**> | The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. | [optional] -**min_len_1typo** | Option<**i32**> | Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] -**min_len_2typo** | Option<**i32**> | Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] **vector_query** | Option<**String**> | Vector query expression for fetching documents \"closest\" to a given query/document vector. | [optional] -**remote_embedding_timeout_ms** | Option<**i32**> | Timeout (in milliseconds) for fetching remote embeddings. | [optional] -**remote_embedding_num_tries** | Option<**i32**> | Number of times to retry fetching remote embeddings. | [optional] +**voice_query** | Option<**String**> | The base64 encoded audio file in 16 khz 16-bit WAV format. | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/MultiSearchResult.md b/typesense_codegen/docs/MultiSearchResult.md index d9f399c..ae61e32 100644 --- a/typesense_codegen/docs/MultiSearchResult.md +++ b/typesense_codegen/docs/MultiSearchResult.md @@ -4,7 +4,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**results** | [**Vec**](SearchResult.md) | | +**conversation** | Option<[**models::SearchResultConversation**](SearchResultConversation.md)> | | [optional] +**results** | [**Vec**](MultiSearchResultItem.md) | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/MultiSearchResultItem.md b/typesense_codegen/docs/MultiSearchResultItem.md new file mode 100644 index 0000000..2ba4735 --- /dev/null +++ b/typesense_codegen/docs/MultiSearchResultItem.md @@ -0,0 +1,23 @@ +# MultiSearchResultItem + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**conversation** | Option<[**models::SearchResultConversation**](SearchResultConversation.md)> | | [optional] +**facet_counts** | Option<[**Vec**](FacetCounts.md)> | | [optional] +**found** | Option<**i32**> | The number of documents found | [optional] +**found_docs** | Option<**i32**> | | [optional] +**grouped_hits** | Option<[**Vec**](SearchGroupedHit.md)> | | [optional] +**hits** | Option<[**Vec**](SearchResultHit.md)> | The documents that matched the search query | [optional] +**out_of** | Option<**i32**> | The total number of documents in the collection | [optional] +**page** | Option<**i32**> | The search result page number | [optional] +**request_params** | Option<[**models::SearchResultRequestParams**](SearchResult_request_params.md)> | | [optional] +**search_cutoff** | Option<**bool**> | Whether the search was cut off | [optional] +**search_time_ms** | Option<**i32**> | The number of milliseconds the search took | [optional] +**code** | Option<**i64**> | HTTP error code | [optional] +**error** | Option<**String**> | Error description | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/MultiSearchSearchesParameter.md b/typesense_codegen/docs/MultiSearchSearchesParameter.md index aee8e7f..3b34942 100644 --- a/typesense_codegen/docs/MultiSearchSearchesParameter.md +++ b/typesense_codegen/docs/MultiSearchSearchesParameter.md @@ -4,7 +4,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**searches** | [**Vec**](MultiSearchCollectionParameters.md) | | +**searches** | [**Vec**](MultiSearchCollectionParameters.md) | | +**union** | Option<**bool**> | When true, merges the search results from each search query into a single ordered set of hits. | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/OperationsApi.md b/typesense_codegen/docs/OperationsApi.md index 9a499de..c8684fa 100644 --- a/typesense_codegen/docs/OperationsApi.md +++ b/typesense_codegen/docs/OperationsApi.md @@ -4,14 +4,98 @@ All URIs are relative to *http://localhost* Method | HTTP request | Description ------------- | ------------- | ------------- +[**get_schema_changes**](OperationsApi.md#get_schema_changes) | **GET** /operations/schema_changes | Get the status of in-progress schema change operations +[**retrieve_api_stats**](OperationsApi.md#retrieve_api_stats) | **GET** /stats.json | Get stats about API endpoints. +[**retrieve_metrics**](OperationsApi.md#retrieve_metrics) | **GET** /metrics.json | Get current RAM, CPU, Disk & Network usage metrics. [**take_snapshot**](OperationsApi.md#take_snapshot) | **POST** /operations/snapshot | Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. [**vote**](OperationsApi.md#vote) | **POST** /operations/vote | Triggers a follower node to initiate the raft voting process, which triggers leader re-election. +## get_schema_changes + +> Vec get_schema_changes() +Get the status of in-progress schema change operations + +Returns the status of any ongoing schema change operations. If no schema changes are in progress, returns an empty response. + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +[**Vec**](SchemaChangeStatus.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## retrieve_api_stats + +> models::ApiStatsResponse retrieve_api_stats() +Get stats about API endpoints. + +Retrieve the stats about API endpoints. + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +[**models::ApiStatsResponse**](APIStatsResponse.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## retrieve_metrics + +> serde_json::Value retrieve_metrics() +Get current RAM, CPU, Disk & Network usage metrics. + +Retrieve the metrics. + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +[**serde_json::Value**](serde_json::Value.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + ## take_snapshot -> crate::models::SuccessStatus take_snapshot(snapshot_path) +> models::SuccessStatus take_snapshot(snapshot_path) Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. You can then backup the snapshot directory that gets created and later restore it as a data directory, as needed. @@ -25,7 +109,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::SuccessStatus**](SuccessStatus.md) +[**models::SuccessStatus**](SuccessStatus.md) ### Authorization @@ -41,7 +125,7 @@ Name | Type | Description | Required | Notes ## vote -> crate::models::SuccessStatus vote() +> models::SuccessStatus vote() Triggers a follower node to initiate the raft voting process, which triggers leader re-election. Triggers a follower node to initiate the raft voting process, which triggers leader re-election. The follower node that you run this operation against will become the new leader, once this command succeeds. @@ -52,7 +136,7 @@ This endpoint does not need any parameter. ### Return type -[**crate::models::SuccessStatus**](SuccessStatus.md) +[**models::SuccessStatus**](SuccessStatus.md) ### Authorization diff --git a/typesense_codegen/docs/OverrideApi.md b/typesense_codegen/docs/OverrideApi.md index 02f1ca5..0cd9129 100644 --- a/typesense_codegen/docs/OverrideApi.md +++ b/typesense_codegen/docs/OverrideApi.md @@ -10,7 +10,7 @@ Method | HTTP request | Description ## get_search_override -> crate::models::SearchOverride get_search_override(collection_name, override_id) +> models::SearchOverride get_search_override(collection_name, override_id) Retrieve a single search override Retrieve the details of a search override, given its id. @@ -25,7 +25,7 @@ Name | Type | Description | Required | Notes ### Return type -[**crate::models::SearchOverride**](SearchOverride.md) +[**models::SearchOverride**](SearchOverride.md) ### Authorization diff --git a/typesense_codegen/docs/PresetDeleteSchema.md b/typesense_codegen/docs/PresetDeleteSchema.md new file mode 100644 index 0000000..36bbacf --- /dev/null +++ b/typesense_codegen/docs/PresetDeleteSchema.md @@ -0,0 +1,11 @@ +# PresetDeleteSchema + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **String** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/PresetSchema.md b/typesense_codegen/docs/PresetSchema.md new file mode 100644 index 0000000..e1457b9 --- /dev/null +++ b/typesense_codegen/docs/PresetSchema.md @@ -0,0 +1,12 @@ +# PresetSchema + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value** | [**models::PresetUpsertSchemaValue**](PresetUpsertSchema_value.md) | | +**name** | **String** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/PresetUpsertSchema.md b/typesense_codegen/docs/PresetUpsertSchema.md new file mode 100644 index 0000000..ac9f9cd --- /dev/null +++ b/typesense_codegen/docs/PresetUpsertSchema.md @@ -0,0 +1,11 @@ +# PresetUpsertSchema + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value** | [**models::PresetUpsertSchemaValue**](PresetUpsertSchema_value.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/PresetUpsertSchemaValue.md b/typesense_codegen/docs/PresetUpsertSchemaValue.md new file mode 100644 index 0000000..073a07c --- /dev/null +++ b/typesense_codegen/docs/PresetUpsertSchemaValue.md @@ -0,0 +1,12 @@ +# PresetUpsertSchemaValue + +## Enum Variants + +| Name | Description | +|---- | -----| +| MultiSearchSearchesParameter | | +| SearchParameters | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/PresetsApi.md b/typesense_codegen/docs/PresetsApi.md new file mode 100644 index 0000000..52b9f60 --- /dev/null +++ b/typesense_codegen/docs/PresetsApi.md @@ -0,0 +1,130 @@ +# \PresetsApi + +All URIs are relative to *http://localhost* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**delete_preset**](PresetsApi.md#delete_preset) | **DELETE** /presets/{presetId} | Delete a preset. +[**retrieve_all_presets**](PresetsApi.md#retrieve_all_presets) | **GET** /presets | Retrieves all presets. +[**retrieve_preset**](PresetsApi.md#retrieve_preset) | **GET** /presets/{presetId} | Retrieves a preset. +[**upsert_preset**](PresetsApi.md#upsert_preset) | **PUT** /presets/{presetId} | Upserts a preset. + + + +## delete_preset + +> models::PresetDeleteSchema delete_preset(preset_id) +Delete a preset. + +Permanently deletes a preset, given it's name. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**preset_id** | **String** | The ID of the preset to delete. | [required] | + +### Return type + +[**models::PresetDeleteSchema**](PresetDeleteSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## retrieve_all_presets + +> models::PresetsRetrieveSchema retrieve_all_presets() +Retrieves all presets. + +Retrieve the details of all presets + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +[**models::PresetsRetrieveSchema**](PresetsRetrieveSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## retrieve_preset + +> models::PresetSchema retrieve_preset(preset_id) +Retrieves a preset. + +Retrieve the details of a preset, given it's name. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**preset_id** | **String** | The ID of the preset to retrieve. | [required] | + +### Return type + +[**models::PresetSchema**](PresetSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## upsert_preset + +> models::PresetSchema upsert_preset(preset_id, preset_upsert_schema) +Upserts a preset. + +Create or update an existing preset. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**preset_id** | **String** | The name of the preset set to upsert. | [required] | +**preset_upsert_schema** | [**PresetUpsertSchema**](PresetUpsertSchema.md) | The stopwords set to upsert. | [required] | + +### Return type + +[**models::PresetSchema**](PresetSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/typesense_codegen/docs/PresetsRetrieveSchema.md b/typesense_codegen/docs/PresetsRetrieveSchema.md new file mode 100644 index 0000000..6c2ace3 --- /dev/null +++ b/typesense_codegen/docs/PresetsRetrieveSchema.md @@ -0,0 +1,11 @@ +# PresetsRetrieveSchema + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**presets** | [**Vec**](PresetSchema.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/SchemaChangeStatus.md b/typesense_codegen/docs/SchemaChangeStatus.md new file mode 100644 index 0000000..60ec4dd --- /dev/null +++ b/typesense_codegen/docs/SchemaChangeStatus.md @@ -0,0 +1,13 @@ +# SchemaChangeStatus + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**altered_docs** | Option<**i32**> | Number of documents that have been altered | [optional] +**collection** | Option<**String**> | Name of the collection being modified | [optional] +**validated_docs** | Option<**i32**> | Number of documents that have been validated | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/ScopedKeyParameters.md b/typesense_codegen/docs/ScopedKeyParameters.md index e29c05a..791d83b 100644 --- a/typesense_codegen/docs/ScopedKeyParameters.md +++ b/typesense_codegen/docs/ScopedKeyParameters.md @@ -4,8 +4,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**filter_by** | Option<**String**> | | [optional] **expires_at** | Option<**i64**> | | [optional] +**filter_by** | Option<**String**> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchGroupedHit.md b/typesense_codegen/docs/SearchGroupedHit.md index 996a423..1972f65 100644 --- a/typesense_codegen/docs/SearchGroupedHit.md +++ b/typesense_codegen/docs/SearchGroupedHit.md @@ -6,7 +6,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **found** | Option<**i32**> | | [optional] **group_key** | [**Vec**](serde_json::Value.md) | | -**hits** | [**Vec**](SearchResultHit.md) | The documents that matched the search query | +**hits** | [**Vec**](SearchResultHit.md) | The documents that matched the search query | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchHighlight.md b/typesense_codegen/docs/SearchHighlight.md index 7ae7a33..7b2b5a7 100644 --- a/typesense_codegen/docs/SearchHighlight.md +++ b/typesense_codegen/docs/SearchHighlight.md @@ -5,12 +5,12 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **field** | Option<**String**> | | [optional] +**indices** | Option<**Vec**> | The indices property will be present only for string[] fields and will contain the corresponding indices of the snippets in the search field | [optional] +**matched_tokens** | Option<[**Vec**](serde_json::Value.md)> | | [optional] **snippet** | Option<**String**> | Present only for (non-array) string fields | [optional] **snippets** | Option<**Vec**> | Present only for (array) string[] fields | [optional] **value** | Option<**String**> | Full field value with highlighting, present only for (non-array) string fields | [optional] **values** | Option<**Vec**> | Full field value with highlighting, present only for (array) string[] fields | [optional] -**indices** | Option<**Vec**> | The indices property will be present only for string[] fields and will contain the corresponding indices of the snippets in the search field | [optional] -**matched_tokens** | Option<[**Vec**](serde_json::Value.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchOverride.md b/typesense_codegen/docs/SearchOverride.md index 0c0cbfc..356631c 100644 --- a/typesense_codegen/docs/SearchOverride.md +++ b/typesense_codegen/docs/SearchOverride.md @@ -4,11 +4,18 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**rule** | [**crate::models::SearchOverrideRule**](SearchOverrideRule.md) | | -**includes** | Option<[**Vec**](SearchOverrideInclude.md)> | List of document `id`s that should be included in the search results with their corresponding `position`s. | [optional] -**excludes** | Option<[**Vec**](SearchOverrideExclude.md)> | List of document `id`s that should be excluded from the search results. | [optional] +**effective_from_ts** | Option<**i32**> | A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. | [optional] +**effective_to_ts** | Option<**i32**> | A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. | [optional] +**excludes** | Option<[**Vec**](SearchOverrideExclude.md)> | List of document `id`s that should be excluded from the search results. | [optional] **filter_by** | Option<**String**> | A filter by clause that is applied to any search query that matches the override rule. | [optional] +**filter_curated_hits** | Option<**bool**> | When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. | [optional] +**includes** | Option<[**Vec**](SearchOverrideInclude.md)> | List of document `id`s that should be included in the search results with their corresponding `position`s. | [optional] +**metadata** | Option<[**serde_json::Value**](.md)> | Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. | [optional] **remove_matched_tokens** | Option<**bool**> | Indicates whether search query tokens that exist in the override's rule should be removed from the search query. | [optional] +**replace_query** | Option<**String**> | Replaces the current search query with this value, when the search query matches the override rule. | [optional] +**rule** | [**models::SearchOverrideRule**](SearchOverrideRule.md) | | +**sort_by** | Option<**String**> | A sort by clause that is applied to any search query that matches the override rule. | [optional] +**stop_processing** | Option<**bool**> | When set to true, override processing will stop at the first matching rule. When set to false override processing will continue and multiple override actions will be triggered in sequence. Overrides are processed in the lexical sort order of their id field. Default: true. | [optional] **id** | **String** | | [readonly] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchOverrideDeleteResponse.md b/typesense_codegen/docs/SearchOverrideDeleteResponse.md new file mode 100644 index 0000000..3392bd0 --- /dev/null +++ b/typesense_codegen/docs/SearchOverrideDeleteResponse.md @@ -0,0 +1,11 @@ +# SearchOverrideDeleteResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **String** | The id of the override that was deleted | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/SearchOverrideRule.md b/typesense_codegen/docs/SearchOverrideRule.md index f4ac180..8af3a3d 100644 --- a/typesense_codegen/docs/SearchOverrideRule.md +++ b/typesense_codegen/docs/SearchOverrideRule.md @@ -4,8 +4,10 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**query** | **String** | Indicates what search queries should be overridden | -**r#match** | **String** | Indicates whether the match on the query term should be `exact` or `contains`. If we want to match all queries that contained the word `apple`, we will use the `contains` match instead. | +**filter_by** | Option<**String**> | Indicates that the override should apply when the filter_by parameter in a search query exactly matches the string specified here (including backticks, spaces, brackets, etc). | [optional] +**r#match** | Option<**String**> | Indicates whether the match on the query term should be `exact` or `contains`. If we want to match all queries that contained the word `apple`, we will use the `contains` match instead. | [optional] +**query** | Option<**String**> | Indicates what search queries should be overridden | [optional] +**tags** | Option<**Vec**> | List of tag values to associate with this override rule. | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchOverrideSchema.md b/typesense_codegen/docs/SearchOverrideSchema.md index 6aaad22..b941538 100644 --- a/typesense_codegen/docs/SearchOverrideSchema.md +++ b/typesense_codegen/docs/SearchOverrideSchema.md @@ -4,11 +4,18 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**rule** | [**crate::models::SearchOverrideRule**](SearchOverrideRule.md) | | -**includes** | Option<[**Vec**](SearchOverrideInclude.md)> | List of document `id`s that should be included in the search results with their corresponding `position`s. | [optional] -**excludes** | Option<[**Vec**](SearchOverrideExclude.md)> | List of document `id`s that should be excluded from the search results. | [optional] +**effective_from_ts** | Option<**i32**> | A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. | [optional] +**effective_to_ts** | Option<**i32**> | A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. | [optional] +**excludes** | Option<[**Vec**](SearchOverrideExclude.md)> | List of document `id`s that should be excluded from the search results. | [optional] **filter_by** | Option<**String**> | A filter by clause that is applied to any search query that matches the override rule. | [optional] +**filter_curated_hits** | Option<**bool**> | When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. | [optional] +**includes** | Option<[**Vec**](SearchOverrideInclude.md)> | List of document `id`s that should be included in the search results with their corresponding `position`s. | [optional] +**metadata** | Option<[**serde_json::Value**](.md)> | Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. | [optional] **remove_matched_tokens** | Option<**bool**> | Indicates whether search query tokens that exist in the override's rule should be removed from the search query. | [optional] +**replace_query** | Option<**String**> | Replaces the current search query with this value, when the search query matches the override rule. | [optional] +**rule** | [**models::SearchOverrideRule**](SearchOverrideRule.md) | | +**sort_by** | Option<**String**> | A sort by clause that is applied to any search query that matches the override rule. | [optional] +**stop_processing** | Option<**bool**> | When set to true, override processing will stop at the first matching rule. When set to false override processing will continue and multiple override actions will be triggered in sequence. Overrides are processed in the lexical sort order of their id field. Default: true. | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchOverridesResponse.md b/typesense_codegen/docs/SearchOverridesResponse.md index 422e1b3..4c3952b 100644 --- a/typesense_codegen/docs/SearchOverridesResponse.md +++ b/typesense_codegen/docs/SearchOverridesResponse.md @@ -4,7 +4,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**overrides** | [**Vec**](SearchOverride.md) | | +**overrides** | [**Vec**](SearchOverride.md) | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchParameters.md b/typesense_codegen/docs/SearchParameters.md index 740eb30..a27ef45 100644 --- a/typesense_codegen/docs/SearchParameters.md +++ b/typesense_codegen/docs/SearchParameters.md @@ -4,55 +4,73 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**q** | **String** | The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. | -**query_by** | **String** | A list of `string` fields that should be queried against. Multiple fields are separated with a comma. | -**query_by_weights** | Option<**String**> | The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. | [optional] -**text_match_type** | Option<**String**> | In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. | [optional] -**prefix** | Option<**String**> | Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. | [optional] +**cache_ttl** | Option<**i32**> | The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. | [optional] +**conversation** | Option<**bool**> | Enable conversational search. | [optional] +**conversation_id** | Option<**String**> | The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. | [optional] +**conversation_model_id** | Option<**String**> | The Id of Conversation Model to be used. | [optional] +**drop_tokens_mode** | Option<[**models::DropTokensMode**](DropTokensMode.md)> | | [optional] +**drop_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 | [optional] +**enable_highlight_v1** | Option<**bool**> | Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true | [optional][default to true] +**enable_overrides** | Option<**bool**> | If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false | [optional][default to false] +**enable_synonyms** | Option<**bool**> | If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true | [optional] +**enable_typos_for_alpha_numerical_tokens** | Option<**bool**> | Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. | [optional] +**enable_typos_for_numerical_tokens** | Option<**bool**> | Make Typesense disable typos for numerical tokens. | [optional][default to true] +**exclude_fields** | Option<**String**> | List of fields from the document to exclude in the search result | [optional] +**exhaustive_search** | Option<**bool**> | Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). | [optional] +**facet_by** | Option<**String**> | A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. | [optional] +**facet_query** | Option<**String**> | Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". | [optional] +**facet_return_parent** | Option<**String**> | Comma separated string of nested facet fields whose parent object should be returned in facet response. | [optional] +**facet_strategy** | Option<**String**> | Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). | [optional] +**filter_by** | Option<**String**> | Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. | [optional] +**filter_curated_hits** | Option<**bool**> | Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false | [optional] +**group_by** | Option<**String**> | You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. | [optional] +**group_limit** | Option<**i32**> | Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 | [optional] +**group_missing_values** | Option<**bool**> | Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true | [optional] +**hidden_hits** | Option<**String**> | A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] +**highlight_affix_num_tokens** | Option<**i32**> | The number of tokens that should surround the highlighted text on each side. Default: 4 | [optional] +**highlight_end_tag** | Option<**String**> | The end tag used for the highlighted snippets. Default: `` | [optional] +**highlight_fields** | Option<**String**> | A list of custom fields that must be highlighted even if you don't query for them | [optional] +**highlight_full_fields** | Option<**String**> | List of fields which should be highlighted fully without snippeting | [optional] +**highlight_start_tag** | Option<**String**> | The start tag used for the highlighted snippets. Default: `` | [optional] +**include_fields** | Option<**String**> | List of fields from the document to include in the search result | [optional] **infix** | Option<**String**> | If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results | [optional] +**limit** | Option<**i32**> | Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. | [optional] +**max_candidates** | Option<**i32**> | Control the number of words that Typesense considers for typo and prefix searching. | [optional] **max_extra_prefix** | Option<**i32**> | There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. | [optional] **max_extra_suffix** | Option<**i32**> | There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. | [optional] -**filter_by** | Option<**String**> | Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. | [optional] -**sort_by** | Option<**String**> | A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` | [optional] -**facet_by** | Option<**String**> | A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. | [optional] **max_facet_values** | Option<**i32**> | Maximum number of facet values to be returned. | [optional] -**facet_query** | Option<**String**> | Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". | [optional] +**max_filter_by_candidates** | Option<**i32**> | Controls the number of similar words that Typesense considers during fuzzy search on filter_by values. Useful for controlling prefix matches like company_name:Acm*. | [optional] +**min_len_1typo** | Option<**i32**> | Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] +**min_len_2typo** | Option<**i32**> | Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] **num_typos** | Option<**String**> | The number of typographical errors (1 or 2) that would be tolerated. Default: 2 | [optional] +**offset** | Option<**i32**> | Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. | [optional] +**override_tags** | Option<**String**> | Comma separated list of tags to trigger the curations rules that match the tags. | [optional] **page** | Option<**i32**> | Results from this specific page number would be fetched. | [optional] **per_page** | Option<**i32**> | Number of results to fetch per page. Default: 10 | [optional] -**limit** | Option<**i32**> | Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. | [optional] -**offset** | Option<**i32**> | Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. | [optional] -**group_by** | Option<**String**> | You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. | [optional] -**group_limit** | Option<**i32**> | Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 | [optional] -**include_fields** | Option<**String**> | List of fields from the document to include in the search result | [optional] -**exclude_fields** | Option<**String**> | List of fields from the document to exclude in the search result | [optional] -**highlight_full_fields** | Option<**String**> | List of fields which should be highlighted fully without snippeting | [optional] -**highlight_affix_num_tokens** | Option<**i32**> | The number of tokens that should surround the highlighted text on each side. Default: 4 | [optional] -**highlight_start_tag** | Option<**String**> | The start tag used for the highlighted snippets. Default: `` | [optional] -**highlight_end_tag** | Option<**String**> | The end tag used for the highlighted snippets. Default: `` | [optional] -**enable_highlight_v1** | Option<**bool**> | Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true | [optional][default to true] +**pinned_hits** | Option<**String**> | A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] +**pre_segmented_query** | Option<**bool**> | You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same | [optional] +**prefix** | Option<**String**> | Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. | [optional] +**preset** | Option<**String**> | Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. | [optional] +**prioritize_exact_match** | Option<**bool**> | Set this parameter to true to ensure that an exact match is ranked above the others | [optional][default to true] +**prioritize_num_matching_fields** | Option<**bool**> | Make Typesense prioritize documents where the query words appear in more number of fields. | [optional][default to true] +**prioritize_token_position** | Option<**bool**> | Make Typesense prioritize documents where the query words appear earlier in the text. | [optional][default to false] +**q** | Option<**String**> | The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. | [optional] +**query_by** | Option<**String**> | A list of `string` fields that should be queried against. Multiple fields are separated with a comma. | [optional] +**query_by_weights** | Option<**String**> | The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. | [optional] +**remote_embedding_num_tries** | Option<**i32**> | Number of times to retry fetching remote embeddings. | [optional] +**remote_embedding_timeout_ms** | Option<**i32**> | Timeout (in milliseconds) for fetching remote embeddings. | [optional] +**search_cutoff_ms** | Option<**i32**> | Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. | [optional] **snippet_threshold** | Option<**i32**> | Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 | [optional] -**drop_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 | [optional] -**typo_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 | [optional] -**pinned_hits** | Option<**String**> | A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] -**hidden_hits** | Option<**String**> | A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] -**highlight_fields** | Option<**String**> | A list of custom fields that must be highlighted even if you don't query for them | [optional] +**sort_by** | Option<**String**> | A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` | [optional] **split_join_tokens** | Option<**String**> | Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. Splitting/joining of tokens will only be attempted if the original query produces no results. To always trigger this behavior, set value to `always``. To disable, set value to `off`. Default is `fallback`. | [optional] -**pre_segmented_query** | Option<**bool**> | You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same | [optional] -**preset** | Option<**String**> | Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. | [optional] -**enable_overrides** | Option<**bool**> | If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false | [optional] -**prioritize_exact_match** | Option<**bool**> | Set this parameter to true to ensure that an exact match is ranked above the others | [optional] -**max_candidates** | Option<**i32**> | Control the number of words that Typesense considers for typo and prefix searching. | [optional] -**prioritize_token_position** | Option<**bool**> | Make Typesense prioritize documents where the query words appear earlier in the text. | [optional] -**exhaustive_search** | Option<**bool**> | Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). | [optional] -**search_cutoff_ms** | Option<**i32**> | Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. | [optional] +**stopwords** | Option<**String**> | Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. | [optional] +**synonym_num_typos** | Option<**i32**> | Allow synonym resolution on typo-corrected words in the query. Default: 0 | [optional] +**synonym_prefix** | Option<**bool**> | Allow synonym resolution on word prefixes in the query. Default: false | [optional] +**text_match_type** | Option<**String**> | In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. | [optional] +**typo_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 | [optional] **use_cache** | Option<**bool**> | Enable server side caching of search query results. By default, caching is disabled. | [optional] -**cache_ttl** | Option<**i32**> | The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. | [optional] -**min_len_1typo** | Option<**i32**> | Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] -**min_len_2typo** | Option<**i32**> | Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] **vector_query** | Option<**String**> | Vector query expression for fetching documents \"closest\" to a given query/document vector. | [optional] -**remote_embedding_timeout_ms** | Option<**i32**> | Timeout (in milliseconds) for fetching remote embeddings. | [optional] -**remote_embedding_num_tries** | Option<**i32**> | Number of times to retry fetching remote embeddings. | [optional] +**voice_query** | Option<**String**> | The base64 encoded audio file in 16 khz 16-bit WAV format. | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchResult.md b/typesense_codegen/docs/SearchResult.md index 857f5ea..4c5aa58 100644 --- a/typesense_codegen/docs/SearchResult.md +++ b/typesense_codegen/docs/SearchResult.md @@ -4,15 +4,17 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**facet_counts** | Option<[**Vec**](FacetCounts.md)> | | [optional] +**conversation** | Option<[**models::SearchResultConversation**](SearchResultConversation.md)> | | [optional] +**facet_counts** | Option<[**Vec**](FacetCounts.md)> | | [optional] **found** | Option<**i32**> | The number of documents found | [optional] -**search_time_ms** | Option<**i32**> | The number of milliseconds the search took | [optional] +**found_docs** | Option<**i32**> | | [optional] +**grouped_hits** | Option<[**Vec**](SearchGroupedHit.md)> | | [optional] +**hits** | Option<[**Vec**](SearchResultHit.md)> | The documents that matched the search query | [optional] **out_of** | Option<**i32**> | The total number of documents in the collection | [optional] -**search_cutoff** | Option<**bool**> | Whether the search was cut off | [optional] **page** | Option<**i32**> | The search result page number | [optional] -**grouped_hits** | Option<[**Vec**](SearchGroupedHit.md)> | | [optional] -**hits** | Option<[**Vec**](SearchResultHit.md)> | The documents that matched the search query | [optional] -**request_params** | Option<[**crate::models::SearchResultRequestParams**](SearchResult_request_params.md)> | | [optional] +**request_params** | Option<[**models::SearchResultRequestParams**](SearchResult_request_params.md)> | | [optional] +**search_cutoff** | Option<**bool**> | Whether the search was cut off | [optional] +**search_time_ms** | Option<**i32**> | The number of milliseconds the search took | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchResultConversation.md b/typesense_codegen/docs/SearchResultConversation.md new file mode 100644 index 0000000..2428ba7 --- /dev/null +++ b/typesense_codegen/docs/SearchResultConversation.md @@ -0,0 +1,14 @@ +# SearchResultConversation + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**answer** | **String** | | +**conversation_history** | [**Vec**](serde_json::Value.md) | | +**conversation_id** | **String** | | +**query** | **String** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/SearchResultHit.md b/typesense_codegen/docs/SearchResultHit.md index 3b9d460..9af8364 100644 --- a/typesense_codegen/docs/SearchResultHit.md +++ b/typesense_codegen/docs/SearchResultHit.md @@ -4,11 +4,12 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**highlights** | Option<[**Vec**](SearchHighlight.md)> | (Deprecated) Contains highlighted portions of the search fields | [optional] -**highlight** | Option<[**::std::collections::HashMap**](serde_json::Value.md)> | Highlighted version of the matching document | [optional] -**document** | Option<[**::std::collections::HashMap**](serde_json::Value.md)> | Can be any key-value pair | [optional] +**document** | Option<[**serde_json::Value**](.md)> | Can be any key-value pair | [optional] +**geo_distance_meters** | Option<**std::collections::HashMap**> | Can be any key-value pair | [optional] +**highlight** | Option<[**std::collections::HashMap**](serde_json::Value.md)> | Highlighted version of the matching document | [optional] +**highlights** | Option<[**Vec**](SearchHighlight.md)> | (Deprecated) Contains highlighted portions of the search fields | [optional] **text_match** | Option<**i64**> | | [optional] -**geo_distance_meters** | Option<**::std::collections::HashMap**> | Can be any key-value pair | [optional] +**text_match_info** | Option<[**models::SearchResultHitTextMatchInfo**](SearchResultHit_text_match_info.md)> | | [optional] **vector_distance** | Option<**f32**> | Distance between the query vector and matching document's vector value | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchResultHitTextMatchInfo.md b/typesense_codegen/docs/SearchResultHitTextMatchInfo.md new file mode 100644 index 0000000..21faffa --- /dev/null +++ b/typesense_codegen/docs/SearchResultHitTextMatchInfo.md @@ -0,0 +1,17 @@ +# SearchResultHitTextMatchInfo + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**best_field_score** | Option<**String**> | | [optional] +**best_field_weight** | Option<**i32**> | | [optional] +**fields_matched** | Option<**i32**> | | [optional] +**num_tokens_dropped** | Option<**i64**> | | [optional] +**score** | Option<**String**> | | [optional] +**tokens_matched** | Option<**i32**> | | [optional] +**typo_prefix_score** | Option<**i32**> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/SearchResultRequestParams.md b/typesense_codegen/docs/SearchResultRequestParams.md index 2ba855f..299f95e 100644 --- a/typesense_codegen/docs/SearchResultRequestParams.md +++ b/typesense_codegen/docs/SearchResultRequestParams.md @@ -5,8 +5,9 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **collection_name** | **String** | | -**q** | **String** | | **per_page** | **i32** | | +**q** | **String** | | +**voice_query** | Option<[**models::SearchResultRequestParamsVoiceQuery**](SearchResult_request_params_voice_query.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchResultRequestParamsVoiceQuery.md b/typesense_codegen/docs/SearchResultRequestParamsVoiceQuery.md new file mode 100644 index 0000000..e7293e7 --- /dev/null +++ b/typesense_codegen/docs/SearchResultRequestParamsVoiceQuery.md @@ -0,0 +1,11 @@ +# SearchResultRequestParamsVoiceQuery + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**transcribed_query** | Option<**String**> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/SearchSynonym.md b/typesense_codegen/docs/SearchSynonym.md index 4061c2b..8d2ef72 100644 --- a/typesense_codegen/docs/SearchSynonym.md +++ b/typesense_codegen/docs/SearchSynonym.md @@ -4,7 +4,9 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**locale** | Option<**String**> | Locale for the synonym, leave blank to use the standard tokenizer. | [optional] **root** | Option<**String**> | For 1-way synonyms, indicates the root word that words in the `synonyms` parameter map to. | [optional] +**symbols_to_index** | Option<**Vec**> | By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is. | [optional] **synonyms** | **Vec** | Array of words that should be considered as synonyms. | **id** | **String** | | [readonly] diff --git a/typesense_codegen/docs/SearchSynonymDeleteResponse.md b/typesense_codegen/docs/SearchSynonymDeleteResponse.md new file mode 100644 index 0000000..76ab57c --- /dev/null +++ b/typesense_codegen/docs/SearchSynonymDeleteResponse.md @@ -0,0 +1,11 @@ +# SearchSynonymDeleteResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **String** | The id of the synonym that was deleted | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/SearchSynonymSchema.md b/typesense_codegen/docs/SearchSynonymSchema.md index bcb5956..6f305d1 100644 --- a/typesense_codegen/docs/SearchSynonymSchema.md +++ b/typesense_codegen/docs/SearchSynonymSchema.md @@ -4,7 +4,9 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**locale** | Option<**String**> | Locale for the synonym, leave blank to use the standard tokenizer. | [optional] **root** | Option<**String**> | For 1-way synonyms, indicates the root word that words in the `synonyms` parameter map to. | [optional] +**symbols_to_index** | Option<**Vec**> | By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is. | [optional] **synonyms** | **Vec** | Array of words that should be considered as synonyms. | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchSynonymsResponse.md b/typesense_codegen/docs/SearchSynonymsResponse.md index f88f04c..6b946ee 100644 --- a/typesense_codegen/docs/SearchSynonymsResponse.md +++ b/typesense_codegen/docs/SearchSynonymsResponse.md @@ -4,7 +4,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**synonyms** | [**Vec**](SearchSynonym.md) | | +**synonyms** | [**Vec**](SearchSynonym.md) | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/StemmingApi.md b/typesense_codegen/docs/StemmingApi.md new file mode 100644 index 0000000..582bb81 --- /dev/null +++ b/typesense_codegen/docs/StemmingApi.md @@ -0,0 +1,99 @@ +# \StemmingApi + +All URIs are relative to *http://localhost* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**get_stemming_dictionary**](StemmingApi.md#get_stemming_dictionary) | **GET** /stemming/dictionaries/{dictionaryId} | Retrieve a stemming dictionary +[**import_stemming_dictionary**](StemmingApi.md#import_stemming_dictionary) | **POST** /stemming/dictionaries/import | Import a stemming dictionary +[**list_stemming_dictionaries**](StemmingApi.md#list_stemming_dictionaries) | **GET** /stemming/dictionaries | List all stemming dictionaries + + + +## get_stemming_dictionary + +> models::StemmingDictionary get_stemming_dictionary(dictionary_id) +Retrieve a stemming dictionary + +Fetch details of a specific stemming dictionary. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**dictionary_id** | **String** | The ID of the dictionary to retrieve | [required] | + +### Return type + +[**models::StemmingDictionary**](StemmingDictionary.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## import_stemming_dictionary + +> String import_stemming_dictionary(id, body) +Import a stemming dictionary + +Upload a JSONL file containing word mappings to create or update a stemming dictionary. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**id** | **String** | The ID to assign to the dictionary | [required] | +**body** | **String** | The JSONL file containing word mappings | [required] | + +### Return type + +**String** + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/octet-stream, application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## list_stemming_dictionaries + +> models::ListStemmingDictionaries200Response list_stemming_dictionaries() +List all stemming dictionaries + +Retrieve a list of all available stemming dictionaries. + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +[**models::ListStemmingDictionaries200Response**](listStemmingDictionaries_200_response.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/typesense_codegen/docs/StemmingDictionary.md b/typesense_codegen/docs/StemmingDictionary.md new file mode 100644 index 0000000..4aa3a3e --- /dev/null +++ b/typesense_codegen/docs/StemmingDictionary.md @@ -0,0 +1,12 @@ +# StemmingDictionary + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **String** | Unique identifier for the dictionary | +**words** | [**Vec**](StemmingDictionary_words_inner.md) | List of word mappings in the dictionary | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/StemmingDictionaryWordsInner.md b/typesense_codegen/docs/StemmingDictionaryWordsInner.md new file mode 100644 index 0000000..f9a957d --- /dev/null +++ b/typesense_codegen/docs/StemmingDictionaryWordsInner.md @@ -0,0 +1,12 @@ +# StemmingDictionaryWordsInner + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**root** | **String** | The root form of the word | +**word** | **String** | The word form to be stemmed | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/StopwordsApi.md b/typesense_codegen/docs/StopwordsApi.md new file mode 100644 index 0000000..ec7664f --- /dev/null +++ b/typesense_codegen/docs/StopwordsApi.md @@ -0,0 +1,130 @@ +# \StopwordsApi + +All URIs are relative to *http://localhost* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**delete_stopwords_set**](StopwordsApi.md#delete_stopwords_set) | **DELETE** /stopwords/{setId} | Delete a stopwords set. +[**retrieve_stopwords_set**](StopwordsApi.md#retrieve_stopwords_set) | **GET** /stopwords/{setId} | Retrieves a stopwords set. +[**retrieve_stopwords_sets**](StopwordsApi.md#retrieve_stopwords_sets) | **GET** /stopwords | Retrieves all stopwords sets. +[**upsert_stopwords_set**](StopwordsApi.md#upsert_stopwords_set) | **PUT** /stopwords/{setId} | Upserts a stopwords set. + + + +## delete_stopwords_set + +> models::DeleteStopwordsSet200Response delete_stopwords_set(set_id) +Delete a stopwords set. + +Permanently deletes a stopwords set, given it's name. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**set_id** | **String** | The ID of the stopwords set to delete. | [required] | + +### Return type + +[**models::DeleteStopwordsSet200Response**](deleteStopwordsSet_200_response.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## retrieve_stopwords_set + +> models::StopwordsSetRetrieveSchema retrieve_stopwords_set(set_id) +Retrieves a stopwords set. + +Retrieve the details of a stopwords set, given it's name. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**set_id** | **String** | The ID of the stopwords set to retrieve. | [required] | + +### Return type + +[**models::StopwordsSetRetrieveSchema**](StopwordsSetRetrieveSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## retrieve_stopwords_sets + +> models::StopwordsSetsRetrieveAllSchema retrieve_stopwords_sets() +Retrieves all stopwords sets. + +Retrieve the details of all stopwords sets + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +[**models::StopwordsSetsRetrieveAllSchema**](StopwordsSetsRetrieveAllSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## upsert_stopwords_set + +> models::StopwordsSetSchema upsert_stopwords_set(set_id, stopwords_set_upsert_schema) +Upserts a stopwords set. + +When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**set_id** | **String** | The ID of the stopwords set to upsert. | [required] | +**stopwords_set_upsert_schema** | [**StopwordsSetUpsertSchema**](StopwordsSetUpsertSchema.md) | The stopwords set to upsert. | [required] | + +### Return type + +[**models::StopwordsSetSchema**](StopwordsSetSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/typesense_codegen/docs/StopwordsSetRetrieveSchema.md b/typesense_codegen/docs/StopwordsSetRetrieveSchema.md new file mode 100644 index 0000000..284a9ed --- /dev/null +++ b/typesense_codegen/docs/StopwordsSetRetrieveSchema.md @@ -0,0 +1,11 @@ +# StopwordsSetRetrieveSchema + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**stopwords** | [**models::StopwordsSetSchema**](StopwordsSetSchema.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/StopwordsSetSchema.md b/typesense_codegen/docs/StopwordsSetSchema.md new file mode 100644 index 0000000..637565b --- /dev/null +++ b/typesense_codegen/docs/StopwordsSetSchema.md @@ -0,0 +1,13 @@ +# StopwordsSetSchema + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **String** | | +**locale** | Option<**String**> | | [optional] +**stopwords** | **Vec** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/StopwordsSetUpsertSchema.md b/typesense_codegen/docs/StopwordsSetUpsertSchema.md new file mode 100644 index 0000000..da91c46 --- /dev/null +++ b/typesense_codegen/docs/StopwordsSetUpsertSchema.md @@ -0,0 +1,12 @@ +# StopwordsSetUpsertSchema + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**locale** | Option<**String**> | | [optional] +**stopwords** | **Vec** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/StopwordsSetsRetrieveAllSchema.md b/typesense_codegen/docs/StopwordsSetsRetrieveAllSchema.md new file mode 100644 index 0000000..074531b --- /dev/null +++ b/typesense_codegen/docs/StopwordsSetsRetrieveAllSchema.md @@ -0,0 +1,11 @@ +# StopwordsSetsRetrieveAllSchema + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**stopwords** | [**Vec**](StopwordsSetSchema.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/SynonymsApi.md b/typesense_codegen/docs/SynonymsApi.md new file mode 100644 index 0000000..b09a9dc --- /dev/null +++ b/typesense_codegen/docs/SynonymsApi.md @@ -0,0 +1,132 @@ +# \SynonymsApi + +All URIs are relative to *http://localhost* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**delete_search_synonym**](SynonymsApi.md#delete_search_synonym) | **DELETE** /collections/{collectionName}/synonyms/{synonymId} | Delete a synonym associated with a collection +[**get_search_synonym**](SynonymsApi.md#get_search_synonym) | **GET** /collections/{collectionName}/synonyms/{synonymId} | Retrieve a single search synonym +[**get_search_synonyms**](SynonymsApi.md#get_search_synonyms) | **GET** /collections/{collectionName}/synonyms | List all collection synonyms +[**upsert_search_synonym**](SynonymsApi.md#upsert_search_synonym) | **PUT** /collections/{collectionName}/synonyms/{synonymId} | Create or update a synonym + + + +## delete_search_synonym + +> models::SearchSynonymDeleteResponse delete_search_synonym(collection_name, synonym_id) +Delete a synonym associated with a collection + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**collection_name** | **String** | The name of the collection | [required] | +**synonym_id** | **String** | The ID of the search synonym to delete | [required] | + +### Return type + +[**models::SearchSynonymDeleteResponse**](SearchSynonymDeleteResponse.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## get_search_synonym + +> models::SearchSynonym get_search_synonym(collection_name, synonym_id) +Retrieve a single search synonym + +Retrieve the details of a search synonym, given its id. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**collection_name** | **String** | The name of the collection | [required] | +**synonym_id** | **String** | The id of the search synonym | [required] | + +### Return type + +[**models::SearchSynonym**](SearchSynonym.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## get_search_synonyms + +> models::SearchSynonymsResponse get_search_synonyms(collection_name) +List all collection synonyms + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**collection_name** | **String** | The name of the collection | [required] | + +### Return type + +[**models::SearchSynonymsResponse**](SearchSynonymsResponse.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## upsert_search_synonym + +> models::SearchSynonym upsert_search_synonym(collection_name, synonym_id, search_synonym_schema) +Create or update a synonym + +Create or update a synonym to define search terms that should be considered equivalent. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**collection_name** | **String** | The name of the collection | [required] | +**synonym_id** | **String** | The ID of the search synonym to create/update | [required] | +**search_synonym_schema** | [**SearchSynonymSchema**](SearchSynonymSchema.md) | The search synonym object to be created/updated | [required] | + +### Return type + +[**models::SearchSynonym**](SearchSynonym.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/typesense_codegen/docs/VoiceQueryModelCollectionConfig.md b/typesense_codegen/docs/VoiceQueryModelCollectionConfig.md new file mode 100644 index 0000000..24b0aa9 --- /dev/null +++ b/typesense_codegen/docs/VoiceQueryModelCollectionConfig.md @@ -0,0 +1,11 @@ +# VoiceQueryModelCollectionConfig + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**model_name** | Option<**String**> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/src/apis/analytics_api.rs b/typesense_codegen/src/apis/analytics_api.rs index f0c1c7e..827f420 100644 --- a/typesense_codegen/src/apis/analytics_api.rs +++ b/typesense_codegen/src/apis/analytics_api.rs @@ -3,19 +3,31 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ -use super::{configuration, Error}; -use crate::apis::ResponseContent; + +use reqwest; +use serde::{Deserialize, Serialize, de::Error as _}; +use crate::{apis::ResponseContent, models}; +use super::{Error, configuration, ContentType}; + + +/// struct for typed errors of method [`create_analytics_event`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum CreateAnalyticsEventError { + Status400(models::ApiResponse), + UnknownValue(serde_json::Value), +} /// struct for typed errors of method [`create_analytics_rule`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum CreateAnalyticsRuleError { - Status400(crate::models::ApiResponse), + Status400(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -23,7 +35,7 @@ pub enum CreateAnalyticsRuleError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum DeleteAnalyticsRuleError { - Status404(crate::models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -31,7 +43,7 @@ pub enum DeleteAnalyticsRuleError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum RetrieveAnalyticsRuleError { - Status404(crate::models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -42,198 +54,284 @@ pub enum RetrieveAnalyticsRulesError { UnknownValue(serde_json::Value), } -/// When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. -pub async fn create_analytics_rule( - configuration: &configuration::Configuration, - analytics_rule_schema: crate::models::AnalyticsRuleSchema, -) -> Result> { - let local_var_configuration = configuration; +/// struct for typed errors of method [`upsert_analytics_rule`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum UpsertAnalyticsRuleError { + Status400(models::ApiResponse), + UnknownValue(serde_json::Value), +} + - let local_var_client = &local_var_configuration.client; +/// Sending events for analytics e.g rank search results based on popularity. +pub async fn create_analytics_event(configuration: &configuration::Configuration, analytics_event_create_schema: models::AnalyticsEventCreateSchema) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_analytics_event_create_schema = analytics_event_create_schema; - let local_var_uri_str = format!("{}/analytics/rules", local_var_configuration.base_path); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + let uri_str = format!("{}/analytics/events", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - local_var_req_builder = local_var_req_builder.json(&analytics_rule_schema); + req_builder = req_builder.json(&p_analytics_event_create_schema); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::AnalyticsEventCreateResponse`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::AnalyticsEventCreateResponse`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; +/// When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. +pub async fn create_analytics_rule(configuration: &configuration::Configuration, analytics_rule_schema: models::AnalyticsRuleSchema) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_analytics_rule_schema = analytics_rule_schema; - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; + let uri_str = format!("{}/analytics/rules", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) - } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - Err(Error::ResponseError(local_var_error)) + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + req_builder = req_builder.json(&p_analytics_rule_schema); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::AnalyticsRuleSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::AnalyticsRuleSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Permanently deletes an analytics rule, given it's name -pub async fn delete_analytics_rule( - configuration: &configuration::Configuration, - rule_name: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/analytics/rules/{ruleName}", - local_var_configuration.base_path, - ruleName = crate::apis::urlencode(rule_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::DELETE, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); +pub async fn delete_analytics_rule(configuration: &configuration::Configuration, rule_name: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_rule_name = rule_name; + + let uri_str = format!("{}/analytics/rules/{ruleName}", configuration.base_path, ruleName=crate::apis::urlencode(p_rule_name)); + let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::AnalyticsRuleDeleteResponse`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::AnalyticsRuleDeleteResponse`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Retrieve the details of an analytics rule, given it's name -pub async fn retrieve_analytics_rule( - configuration: &configuration::Configuration, - rule_name: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/analytics/rules/{ruleName}", - local_var_configuration.base_path, - ruleName = crate::apis::urlencode(rule_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); +pub async fn retrieve_analytics_rule(configuration: &configuration::Configuration, rule_name: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_rule_name = rule_name; + + let uri_str = format!("{}/analytics/rules/{ruleName}", configuration.base_path, ruleName=crate::apis::urlencode(p_rule_name)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::AnalyticsRuleSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::AnalyticsRuleSchema`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Retrieve the details of all analytics rules -pub async fn retrieve_analytics_rules( - configuration: &configuration::Configuration, -) -> Result> { - let local_var_configuration = configuration; +pub async fn retrieve_analytics_rules(configuration: &configuration::Configuration, ) -> Result> { - let local_var_client = &local_var_configuration.client; + let uri_str = format!("{}/analytics/rules", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); - let local_var_uri_str = format!("{}/analytics/rules", local_var_configuration.base_path); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::AnalyticsRulesRetrieveSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::AnalyticsRulesRetrieveSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; +/// Upserts an analytics rule with the given name. +pub async fn upsert_analytics_rule(configuration: &configuration::Configuration, rule_name: &str, analytics_rule_upsert_schema: models::AnalyticsRuleUpsertSchema) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_rule_name = rule_name; + let p_analytics_rule_upsert_schema = analytics_rule_upsert_schema; - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) - } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, + let uri_str = format!("{}/analytics/rules/{ruleName}", configuration.base_path, ruleName=crate::apis::urlencode(p_rule_name)); + let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - Err(Error::ResponseError(local_var_error)) + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + req_builder = req_builder.json(&p_analytics_rule_upsert_schema); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::AnalyticsRuleSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::AnalyticsRuleSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } + diff --git a/typesense_codegen/src/apis/collections_api.rs b/typesense_codegen/src/apis/collections_api.rs index 0e0f5bf..0c3f7f3 100644 --- a/typesense_codegen/src/apis/collections_api.rs +++ b/typesense_codegen/src/apis/collections_api.rs @@ -3,20 +3,24 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ -use super::{configuration, Error}; -use crate::apis::ResponseContent; + +use reqwest; +use serde::{Deserialize, Serialize, de::Error as _}; +use crate::{apis::ResponseContent, models}; +use super::{Error, configuration, ContentType}; + /// struct for typed errors of method [`create_collection`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum CreateCollectionError { - Status400(crate::models::ApiResponse), - Status409(crate::models::ApiResponse), + Status400(models::ApiResponse), + Status409(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -24,7 +28,7 @@ pub enum CreateCollectionError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum DeleteAliasError { - Status404(crate::models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -32,7 +36,7 @@ pub enum DeleteAliasError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum DeleteCollectionError { - Status404(crate::models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -40,7 +44,7 @@ pub enum DeleteCollectionError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum GetAliasError { - Status404(crate::models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -55,7 +59,7 @@ pub enum GetAliasesError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum GetCollectionError { - Status404(crate::models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -70,8 +74,8 @@ pub enum GetCollectionsError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum UpdateCollectionError { - Status400(crate::models::ApiResponse), - Status404(crate::models::ApiResponse), + Status400(models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -79,455 +83,414 @@ pub enum UpdateCollectionError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum UpsertAliasError { - Status400(crate::models::ApiResponse), - Status404(crate::models::ApiResponse), + Status400(models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } -/// When a collection is created, we give it a name and describe the fields that will be indexed from the documents added to the collection. -pub async fn create_collection( - configuration: &configuration::Configuration, - collection_schema: crate::models::CollectionSchema, -) -> Result> { - let local_var_configuration = configuration; - let local_var_client = &local_var_configuration.client; +/// When a collection is created, we give it a name and describe the fields that will be indexed from the documents added to the collection. +pub async fn create_collection(configuration: &configuration::Configuration, collection_schema: models::CollectionSchema) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_schema = collection_schema; - let local_var_uri_str = format!("{}/collections", local_var_configuration.base_path); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + let uri_str = format!("{}/collections", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - local_var_req_builder = local_var_req_builder.json(&collection_schema); - - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + req_builder = req_builder.json(&p_collection_schema); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::CollectionResponse`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::CollectionResponse`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } -pub async fn delete_alias( - configuration: &configuration::Configuration, - alias_name: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/aliases/{aliasName}", - local_var_configuration.base_path, - aliasName = crate::apis::urlencode(alias_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::DELETE, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); +pub async fn delete_alias(configuration: &configuration::Configuration, alias_name: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_alias_name = alias_name; + + let uri_str = format!("{}/aliases/{aliasName}", configuration.base_path, aliasName=crate::apis::urlencode(p_alias_name)); + let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::CollectionAlias`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::CollectionAlias`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Permanently drops a collection. This action cannot be undone. For large collections, this might have an impact on read latencies. -pub async fn delete_collection( - configuration: &configuration::Configuration, - collection_name: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::DELETE, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); +pub async fn delete_collection(configuration: &configuration::Configuration, collection_name: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + + let uri_str = format!("{}/collections/{collectionName}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::CollectionResponse`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::CollectionResponse`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Find out which collection an alias points to by fetching it -pub async fn get_alias( - configuration: &configuration::Configuration, - alias_name: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/aliases/{aliasName}", - local_var_configuration.base_path, - aliasName = crate::apis::urlencode(alias_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); +pub async fn get_alias(configuration: &configuration::Configuration, alias_name: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_alias_name = alias_name; + + let uri_str = format!("{}/aliases/{aliasName}", configuration.base_path, aliasName=crate::apis::urlencode(p_alias_name)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::CollectionAlias`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::CollectionAlias`")))), + } } else { - let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// List all aliases and the corresponding collections that they map to. -pub async fn get_aliases( - configuration: &configuration::Configuration, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; +pub async fn get_aliases(configuration: &configuration::Configuration, ) -> Result> { - let local_var_uri_str = format!("{}/aliases", local_var_configuration.base_path); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + let uri_str = format!("{}/aliases", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::CollectionAliasesResponse`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::CollectionAliasesResponse`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Retrieve the details of a collection, given its name. -pub async fn get_collection( - configuration: &configuration::Configuration, - collection_name: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); +pub async fn get_collection(configuration: &configuration::Configuration, collection_name: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + + let uri_str = format!("{}/collections/{collectionName}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::CollectionResponse`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::CollectionResponse`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Returns a summary of all your collections. The collections are returned sorted by creation date, with the most recent collections appearing first. -pub async fn get_collections( - configuration: &configuration::Configuration, -) -> Result, Error> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; +pub async fn get_collections(configuration: &configuration::Configuration, ) -> Result, Error> { - let local_var_uri_str = format!("{}/collections", local_var_configuration.base_path); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + let uri_str = format!("{}/collections", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `Vec<models::CollectionResponse>`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `Vec<models::CollectionResponse>`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Update a collection's schema to modify the fields and their types. -pub async fn update_collection( - configuration: &configuration::Configuration, - collection_name: &str, - collection_update_schema: crate::models::CollectionUpdateSchema, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::PATCH, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); +pub async fn update_collection(configuration: &configuration::Configuration, collection_name: &str, collection_update_schema: models::CollectionUpdateSchema) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_collection_update_schema = collection_update_schema; + + let uri_str = format!("{}/collections/{collectionName}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let mut req_builder = configuration.client.request(reqwest::Method::PATCH, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - local_var_req_builder = local_var_req_builder.json(&collection_update_schema); - - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + req_builder = req_builder.json(&p_collection_update_schema); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::CollectionUpdateSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::CollectionUpdateSchema`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Create or update a collection alias. An alias is a virtual collection name that points to a real collection. If you're familiar with symbolic links on Linux, it's very similar to that. Aliases are useful when you want to reindex your data in the background on a new collection and switch your application to it without any changes to your code. -pub async fn upsert_alias( - configuration: &configuration::Configuration, - alias_name: &str, - collection_alias_schema: Option, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/aliases/{aliasName}", - local_var_configuration.base_path, - aliasName = crate::apis::urlencode(alias_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::PUT, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); +pub async fn upsert_alias(configuration: &configuration::Configuration, alias_name: &str, collection_alias_schema: Option) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_alias_name = alias_name; + let p_collection_alias_schema = collection_alias_schema; + + let uri_str = format!("{}/aliases/{aliasName}", configuration.base_path, aliasName=crate::apis::urlencode(p_alias_name)); + let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - local_var_req_builder = local_var_req_builder.json(&collection_alias_schema); - - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + req_builder = req_builder.json(&p_collection_alias_schema); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::CollectionAlias`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::CollectionAlias`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } + diff --git a/typesense_codegen/src/apis/configuration.rs b/typesense_codegen/src/apis/configuration.rs index f5e29b4..1a85d14 100644 --- a/typesense_codegen/src/apis/configuration.rs +++ b/typesense_codegen/src/apis/configuration.rs @@ -3,11 +3,13 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ + + #[derive(Debug, Clone)] pub struct Configuration { pub base_path: String, @@ -17,7 +19,6 @@ pub struct Configuration { pub oauth_access_token: Option, pub bearer_access_token: Option, pub api_key: Option, - // TODO: take an oauth2 token source, similar to the go one } pub type BasicAuth = (String, Option); @@ -28,6 +29,7 @@ pub struct ApiKey { pub key: String, } + impl Configuration { pub fn new() -> Configuration { Configuration::default() @@ -38,7 +40,7 @@ impl Default for Configuration { fn default() -> Self { Configuration { base_path: "http://localhost".to_owned(), - user_agent: Some("OpenAPI-Generator/0.25.0/rust".to_owned()), + user_agent: Some("OpenAPI-Generator/28.0/rust".to_owned()), client: reqwest::Client::new(), basic_auth: None, oauth_access_token: None, diff --git a/typesense_codegen/src/apis/conversations_api.rs b/typesense_codegen/src/apis/conversations_api.rs new file mode 100644 index 0000000..b9f8c0a --- /dev/null +++ b/typesense_codegen/src/apis/conversations_api.rs @@ -0,0 +1,280 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + + +use reqwest; +use serde::{Deserialize, Serialize, de::Error as _}; +use crate::{apis::ResponseContent, models}; +use super::{Error, configuration, ContentType}; + + +/// struct for typed errors of method [`create_conversation_model`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum CreateConversationModelError { + Status400(models::ApiResponse), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`delete_conversation_model`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum DeleteConversationModelError { + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`retrieve_all_conversation_models`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum RetrieveAllConversationModelsError { + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`retrieve_conversation_model`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum RetrieveConversationModelError { + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`update_conversation_model`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum UpdateConversationModelError { + UnknownValue(serde_json::Value), +} + + +/// Create a Conversation Model +pub async fn create_conversation_model(configuration: &configuration::Configuration, conversation_model_create_schema: models::ConversationModelCreateSchema) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_conversation_model_create_schema = conversation_model_create_schema; + + let uri_str = format!("{}/conversations/models", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + req_builder = req_builder.json(&p_conversation_model_create_schema); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::ConversationModelSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::ConversationModelSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Delete a conversation model +pub async fn delete_conversation_model(configuration: &configuration::Configuration, model_id: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_model_id = model_id; + + let uri_str = format!("{}/conversations/models/{modelId}", configuration.base_path, modelId=crate::apis::urlencode(p_model_id)); + let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::ConversationModelSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::ConversationModelSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Retrieve all conversation models +pub async fn retrieve_all_conversation_models(configuration: &configuration::Configuration, ) -> Result, Error> { + + let uri_str = format!("{}/conversations/models", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `Vec<models::ConversationModelSchema>`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `Vec<models::ConversationModelSchema>`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Retrieve a conversation model +pub async fn retrieve_conversation_model(configuration: &configuration::Configuration, model_id: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_model_id = model_id; + + let uri_str = format!("{}/conversations/models/{modelId}", configuration.base_path, modelId=crate::apis::urlencode(p_model_id)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::ConversationModelSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::ConversationModelSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Update a conversation model +pub async fn update_conversation_model(configuration: &configuration::Configuration, model_id: &str, conversation_model_update_schema: models::ConversationModelUpdateSchema) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_model_id = model_id; + let p_conversation_model_update_schema = conversation_model_update_schema; + + let uri_str = format!("{}/conversations/models/{modelId}", configuration.base_path, modelId=crate::apis::urlencode(p_model_id)); + let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + req_builder = req_builder.json(&p_conversation_model_update_schema); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::ConversationModelSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::ConversationModelSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + diff --git a/typesense_codegen/src/apis/curation_api.rs b/typesense_codegen/src/apis/curation_api.rs new file mode 100644 index 0000000..dbcce80 --- /dev/null +++ b/typesense_codegen/src/apis/curation_api.rs @@ -0,0 +1,178 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + + +use reqwest; +use serde::{Deserialize, Serialize, de::Error as _}; +use crate::{apis::ResponseContent, models}; +use super::{Error, configuration, ContentType}; + + +/// struct for typed errors of method [`delete_search_override`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum DeleteSearchOverrideError { + Status404(models::ApiResponse), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`get_search_overrides`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum GetSearchOverridesError { + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`upsert_search_override`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum UpsertSearchOverrideError { + Status404(models::ApiResponse), + UnknownValue(serde_json::Value), +} + + +pub async fn delete_search_override(configuration: &configuration::Configuration, collection_name: &str, override_id: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_override_id = override_id; + + let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), overrideId=crate::apis::urlencode(p_override_id)); + let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::SearchOverrideDeleteResponse`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::SearchOverrideDeleteResponse`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +pub async fn get_search_overrides(configuration: &configuration::Configuration, collection_name: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + + let uri_str = format!("{}/collections/{collectionName}/overrides", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::SearchOverridesResponse`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::SearchOverridesResponse`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Create or update an override to promote certain documents over others. Using overrides, you can include or exclude specific documents for a given query. +pub async fn upsert_search_override(configuration: &configuration::Configuration, collection_name: &str, override_id: &str, search_override_schema: models::SearchOverrideSchema) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_override_id = override_id; + let p_search_override_schema = search_override_schema; + + let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), overrideId=crate::apis::urlencode(p_override_id)); + let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + req_builder = req_builder.json(&p_search_override_schema); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::SearchOverride`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::SearchOverride`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + diff --git a/typesense_codegen/src/apis/debug_api.rs b/typesense_codegen/src/apis/debug_api.rs index 60a727a..3e53f97 100644 --- a/typesense_codegen/src/apis/debug_api.rs +++ b/typesense_codegen/src/apis/debug_api.rs @@ -3,13 +3,17 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ -use super::{configuration, Error}; -use crate::apis::ResponseContent; + +use reqwest; +use serde::{Deserialize, Serialize, de::Error as _}; +use crate::{apis::ResponseContent, models}; +use super::{Error, configuration, ContentType}; + /// struct for typed errors of method [`debug`] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -18,47 +22,47 @@ pub enum DebugError { UnknownValue(serde_json::Value), } -/// Print debugging information -pub async fn debug( - configuration: &configuration::Configuration, -) -> Result> { - let local_var_configuration = configuration; - let local_var_client = &local_var_configuration.client; +/// Print debugging information +pub async fn debug(configuration: &configuration::Configuration, ) -> Result> { - let local_var_uri_str = format!("{}/debug", local_var_configuration.base_path); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + let uri_str = format!("{}/debug", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::Debug200Response`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::Debug200Response`")))), + } } else { - let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } + diff --git a/typesense_codegen/src/apis/documents_api.rs b/typesense_codegen/src/apis/documents_api.rs index b0ca978..c228ba1 100644 --- a/typesense_codegen/src/apis/documents_api.rs +++ b/typesense_codegen/src/apis/documents_api.rs @@ -3,19 +3,23 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ -use super::{configuration, Error}; -use crate::apis::ResponseContent; + +use reqwest; +use serde::{Deserialize, Serialize, de::Error as _}; +use crate::{apis::ResponseContent, models}; +use super::{Error, configuration, ContentType}; + /// struct for typed errors of method [`delete_document`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum DeleteDocumentError { - Status404(crate::models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -23,7 +27,7 @@ pub enum DeleteDocumentError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum DeleteDocumentsError { - Status404(crate::models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -31,15 +35,7 @@ pub enum DeleteDocumentsError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum DeleteSearchOverrideError { - Status404(crate::models::ApiResponse), - UnknownValue(serde_json::Value), -} - -/// struct for typed errors of method [`delete_search_synonym`] -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(untagged)] -pub enum DeleteSearchSynonymError { - Status404(crate::models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -47,7 +43,7 @@ pub enum DeleteSearchSynonymError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum ExportDocumentsError { - Status404(crate::models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -55,7 +51,7 @@ pub enum ExportDocumentsError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum GetDocumentError { - Status404(crate::models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -73,28 +69,12 @@ pub enum GetSearchOverridesError { UnknownValue(serde_json::Value), } -/// struct for typed errors of method [`get_search_synonym`] -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(untagged)] -pub enum GetSearchSynonymError { - Status404(crate::models::ApiResponse), - UnknownValue(serde_json::Value), -} - -/// struct for typed errors of method [`get_search_synonyms`] -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(untagged)] -pub enum GetSearchSynonymsError { - Status404(crate::models::ApiResponse), - UnknownValue(serde_json::Value), -} - /// struct for typed errors of method [`import_documents`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum ImportDocumentsError { - Status400(crate::models::ApiResponse), - Status404(crate::models::ApiResponse), + Status400(models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -102,7 +82,7 @@ pub enum ImportDocumentsError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum IndexDocumentError { - Status404(crate::models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -110,7 +90,7 @@ pub enum IndexDocumentError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum MultiSearchError { - Status400(crate::models::ApiResponse), + Status400(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -118,8 +98,8 @@ pub enum MultiSearchError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum SearchCollectionError { - Status400(crate::models::ApiResponse), - Status404(crate::models::ApiResponse), + Status400(models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -127,7 +107,7 @@ pub enum SearchCollectionError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum UpdateDocumentError { - Status404(crate::models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -135,8 +115,8 @@ pub enum UpdateDocumentError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum UpdateDocumentsError { - Status400(crate::models::ApiResponse), - Status404(crate::models::ApiResponse), + Status400(models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -144,990 +124,1257 @@ pub enum UpdateDocumentsError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum UpsertSearchOverrideError { - Status404(crate::models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } -/// struct for typed errors of method [`upsert_search_synonym`] -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(untagged)] -pub enum UpsertSearchSynonymError { - Status404(crate::models::ApiResponse), - UnknownValue(serde_json::Value), -} /// Delete an individual document from a collection by using its ID. -pub async fn delete_document( - configuration: &configuration::Configuration, - collection_name: &str, - document_id: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/documents/{documentId}", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name), - documentId = crate::apis::urlencode(document_id) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::DELETE, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), - }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); - }; +pub async fn delete_document(configuration: &configuration::Configuration, collection_name: &str, document_id: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_document_id = document_id; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; + let uri_str = format!("{}/collections/{collectionName}/documents/{documentId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), documentId=crate::apis::urlencode(p_document_id)); + let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `serde_json::Value`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `serde_json::Value`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Delete a bunch of documents that match a specific filter condition. Use the `batch_size` parameter to control the number of documents that should deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. -pub async fn delete_documents( - configuration: &configuration::Configuration, - collection_name: &str, - delete_documents_parameters: Option< - crate::models::DeleteDocumentsDeleteDocumentsParametersParameter, - >, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/documents", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::DELETE, local_var_uri_str.as_str()); - - if let Some(ref delete_documents_parameters_ref) = delete_documents_parameters { - local_var_req_builder = local_var_req_builder.query(&delete_documents_parameters_ref); - } - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), +pub async fn delete_documents(configuration: &configuration::Configuration, collection_name: &str, batch_size: Option, filter_by: Option<&str>, ignore_not_found: Option, truncate: Option) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_batch_size = batch_size; + let p_filter_by = filter_by; + let p_ignore_not_found = ignore_not_found; + let p_truncate = truncate; + + let uri_str = format!("{}/collections/{collectionName}/documents", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); + + if let Some(ref param_value) = p_batch_size { + req_builder = req_builder.query(&[("batch_size", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_filter_by { + req_builder = req_builder.query(&[("filter_by", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_ignore_not_found { + req_builder = req_builder.query(&[("ignore_not_found", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_truncate { + req_builder = req_builder.query(&[("truncate", ¶m_value.to_string())]); + } + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::DeleteDocuments200Response`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::DeleteDocuments200Response`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } -pub async fn delete_search_override( - configuration: &configuration::Configuration, - collection_name: &str, - override_id: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/overrides/{overrideId}", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name), - overrideId = crate::apis::urlencode(override_id) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::DELETE, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), - }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); - }; - - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; +pub async fn delete_search_override(configuration: &configuration::Configuration, collection_name: &str, override_id: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_override_id = override_id; - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; + let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), overrideId=crate::apis::urlencode(p_override_id)); + let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) - } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } -} - -pub async fn delete_search_synonym( - configuration: &configuration::Configuration, - collection_name: &str, - synonym_id: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/synonyms/{synonymId}", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name), - synonymId = crate::apis::urlencode(synonym_id) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::DELETE, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::SearchOverrideDeleteResponse`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::SearchOverrideDeleteResponse`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Export all documents in a collection in JSON lines format. -pub async fn export_documents( - configuration: &configuration::Configuration, - collection_name: &str, - export_documents_parameters: Option< - crate::models::ExportDocumentsExportDocumentsParametersParameter, - >, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/documents/export", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); - - if let Some(ref export_documents_parameters_ref) = export_documents_parameters { - local_var_req_builder = local_var_req_builder.query(&export_documents_parameters_ref); - } - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), +pub async fn export_documents(configuration: &configuration::Configuration, collection_name: &str, exclude_fields: Option<&str>, filter_by: Option<&str>, include_fields: Option<&str>) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_exclude_fields = exclude_fields; + let p_filter_by = filter_by; + let p_include_fields = include_fields; + + let uri_str = format!("{}/collections/{collectionName}/documents/export", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref param_value) = p_exclude_fields { + req_builder = req_builder.query(&[("exclude_fields", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_filter_by { + req_builder = req_builder.query(&[("filter_by", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_include_fields { + req_builder = req_builder.query(&[("include_fields", ¶m_value.to_string())]); + } + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `String`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `String`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Fetch an individual document from a collection by using its ID. -pub async fn get_document( - configuration: &configuration::Configuration, - collection_name: &str, - document_id: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/documents/{documentId}", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name), - documentId = crate::apis::urlencode(document_id) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), - }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); - }; +pub async fn get_document(configuration: &configuration::Configuration, collection_name: &str, document_id: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_document_id = document_id; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; + let uri_str = format!("{}/collections/{collectionName}/documents/{documentId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), documentId=crate::apis::urlencode(p_document_id)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `serde_json::Value`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `serde_json::Value`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Retrieve the details of a search override, given its id. -pub async fn get_search_override( - configuration: &configuration::Configuration, - collection_name: &str, - override_id: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/overrides/{overrideId}", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name), - overrideId = crate::apis::urlencode(override_id) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), - }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); - }; +pub async fn get_search_override(configuration: &configuration::Configuration, collection_name: &str, override_id: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_override_id = override_id; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; + let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), overrideId=crate::apis::urlencode(p_override_id)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) - } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } -} - -pub async fn get_search_overrides( - configuration: &configuration::Configuration, - collection_name: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/overrides", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::SearchOverride`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::SearchOverride`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } -/// Retrieve the details of a search synonym, given its id. -pub async fn get_search_synonym( - configuration: &configuration::Configuration, - collection_name: &str, - synonym_id: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/synonyms/{synonymId}", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name), - synonymId = crate::apis::urlencode(synonym_id) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), - }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); - }; - - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; +pub async fn get_search_overrides(configuration: &configuration::Configuration, collection_name: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; + let uri_str = format!("{}/collections/{collectionName}/overrides", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) - } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } -} - -pub async fn get_search_synonyms( - configuration: &configuration::Configuration, - collection_name: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/synonyms", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::SearchOverridesResponse`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::SearchOverridesResponse`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// The documents to be imported must be formatted in a newline delimited JSON structure. You can feed the output file from a Typesense export operation directly as import. -pub async fn import_documents( - configuration: &configuration::Configuration, - collection_name: &str, - body: String, - import_documents_parameters: Option< - crate::models::ImportDocumentsImportDocumentsParametersParameter, - >, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/documents/import", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); - - if let Some(ref import_documents_parameters_ref) = import_documents_parameters { - local_var_req_builder = local_var_req_builder.query(&import_documents_parameters_ref); - } - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), +pub async fn import_documents(configuration: &configuration::Configuration, collection_name: &str, body: &str, action: Option, batch_size: Option, dirty_values: Option, remote_embedding_batch_size: Option, return_doc: Option, return_id: Option) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_body = body; + let p_action = action; + let p_batch_size = batch_size; + let p_dirty_values = dirty_values; + let p_remote_embedding_batch_size = remote_embedding_batch_size; + let p_return_doc = return_doc; + let p_return_id = return_id; + + let uri_str = format!("{}/collections/{collectionName}/documents/import", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); + + if let Some(ref param_value) = p_action { + req_builder = req_builder.query(&[("action", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_batch_size { + req_builder = req_builder.query(&[("batch_size", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_dirty_values { + req_builder = req_builder.query(&[("dirty_values", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_remote_embedding_batch_size { + req_builder = req_builder.query(&[("remote_embedding_batch_size", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_return_doc { + req_builder = req_builder.query(&[("return_doc", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_return_id { + req_builder = req_builder.query(&[("return_id", ¶m_value.to_string())]); + } + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - // was changed by hand - local_var_req_builder = local_var_req_builder.body(body); - - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - // was changed by hand - Ok(local_var_content) + req_builder = req_builder.json(&p_body); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `String`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `String`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// A document to be indexed in a given collection must conform to the schema of the collection. -pub async fn index_document( - configuration: &configuration::Configuration, - collection_name: &str, - body: serde_json::Value, - action: Option<&str>, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/documents", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); - - if let Some(ref local_var_str) = action { - local_var_req_builder = - local_var_req_builder.query(&[("action", &local_var_str.to_string())]); - } - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), +pub async fn index_document(configuration: &configuration::Configuration, collection_name: &str, body: serde_json::Value, action: Option<&str>, dirty_values: Option) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_body = body; + let p_action = action; + let p_dirty_values = dirty_values; + + let uri_str = format!("{}/collections/{collectionName}/documents", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); + + if let Some(ref param_value) = p_action { + req_builder = req_builder.query(&[("action", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_dirty_values { + req_builder = req_builder.query(&[("dirty_values", ¶m_value.to_string())]); + } + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - local_var_req_builder = local_var_req_builder.json(&body); - - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + req_builder = req_builder.json(&p_body); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `serde_json::Value`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `serde_json::Value`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// This is especially useful to avoid round-trip network latencies incurred otherwise if each of these requests are sent in separate HTTP requests. You can also use this feature to do a federated search across multiple collections in a single HTTP request. -pub async fn multi_search serde::Deserialize<'d>>( - configuration: &configuration::Configuration, - multi_search_parameters: crate::models::MultiSearchParameters, - multi_search_searches_parameter: Option, -) -> Result, Error> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!("{}/multi_search", local_var_configuration.base_path); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); - - local_var_req_builder = local_var_req_builder.query(&multi_search_parameters); - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), +pub async fn multi_search(configuration: &configuration::Configuration, cache_ttl: Option, conversation: Option, conversation_id: Option<&str>, conversation_model_id: Option<&str>, drop_tokens_mode: Option, drop_tokens_threshold: Option, enable_highlight_v1: Option, enable_overrides: Option, enable_synonyms: Option, enable_typos_for_alpha_numerical_tokens: Option, enable_typos_for_numerical_tokens: Option, exclude_fields: Option<&str>, exhaustive_search: Option, facet_by: Option<&str>, facet_query: Option<&str>, facet_return_parent: Option<&str>, facet_strategy: Option<&str>, filter_by: Option<&str>, filter_curated_hits: Option, group_by: Option<&str>, group_limit: Option, group_missing_values: Option, hidden_hits: Option<&str>, highlight_affix_num_tokens: Option, highlight_end_tag: Option<&str>, highlight_fields: Option<&str>, highlight_full_fields: Option<&str>, highlight_start_tag: Option<&str>, include_fields: Option<&str>, infix: Option<&str>, limit: Option, max_candidates: Option, max_extra_prefix: Option, max_extra_suffix: Option, max_facet_values: Option, max_filter_by_candidates: Option, min_len_1typo: Option, min_len_2typo: Option, num_typos: Option<&str>, offset: Option, override_tags: Option<&str>, page: Option, per_page: Option, pinned_hits: Option<&str>, pre_segmented_query: Option, prefix: Option<&str>, preset: Option<&str>, prioritize_exact_match: Option, prioritize_num_matching_fields: Option, prioritize_token_position: Option, q: Option<&str>, query_by: Option<&str>, query_by_weights: Option<&str>, remote_embedding_num_tries: Option, remote_embedding_timeout_ms: Option, search_cutoff_ms: Option, snippet_threshold: Option, sort_by: Option<&str>, split_join_tokens: Option<&str>, stopwords: Option<&str>, synonym_num_typos: Option, synonym_prefix: Option, text_match_type: Option<&str>, typo_tokens_threshold: Option, use_cache: Option, vector_query: Option<&str>, voice_query: Option<&str>, multi_search_searches_parameter: Option) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_cache_ttl = cache_ttl; + let p_conversation = conversation; + let p_conversation_id = conversation_id; + let p_conversation_model_id = conversation_model_id; + let p_drop_tokens_mode = drop_tokens_mode; + let p_drop_tokens_threshold = drop_tokens_threshold; + let p_enable_highlight_v1 = enable_highlight_v1; + let p_enable_overrides = enable_overrides; + let p_enable_synonyms = enable_synonyms; + let p_enable_typos_for_alpha_numerical_tokens = enable_typos_for_alpha_numerical_tokens; + let p_enable_typos_for_numerical_tokens = enable_typos_for_numerical_tokens; + let p_exclude_fields = exclude_fields; + let p_exhaustive_search = exhaustive_search; + let p_facet_by = facet_by; + let p_facet_query = facet_query; + let p_facet_return_parent = facet_return_parent; + let p_facet_strategy = facet_strategy; + let p_filter_by = filter_by; + let p_filter_curated_hits = filter_curated_hits; + let p_group_by = group_by; + let p_group_limit = group_limit; + let p_group_missing_values = group_missing_values; + let p_hidden_hits = hidden_hits; + let p_highlight_affix_num_tokens = highlight_affix_num_tokens; + let p_highlight_end_tag = highlight_end_tag; + let p_highlight_fields = highlight_fields; + let p_highlight_full_fields = highlight_full_fields; + let p_highlight_start_tag = highlight_start_tag; + let p_include_fields = include_fields; + let p_infix = infix; + let p_limit = limit; + let p_max_candidates = max_candidates; + let p_max_extra_prefix = max_extra_prefix; + let p_max_extra_suffix = max_extra_suffix; + let p_max_facet_values = max_facet_values; + let p_max_filter_by_candidates = max_filter_by_candidates; + let p_min_len_1typo = min_len_1typo; + let p_min_len_2typo = min_len_2typo; + let p_num_typos = num_typos; + let p_offset = offset; + let p_override_tags = override_tags; + let p_page = page; + let p_per_page = per_page; + let p_pinned_hits = pinned_hits; + let p_pre_segmented_query = pre_segmented_query; + let p_prefix = prefix; + let p_preset = preset; + let p_prioritize_exact_match = prioritize_exact_match; + let p_prioritize_num_matching_fields = prioritize_num_matching_fields; + let p_prioritize_token_position = prioritize_token_position; + let p_q = q; + let p_query_by = query_by; + let p_query_by_weights = query_by_weights; + let p_remote_embedding_num_tries = remote_embedding_num_tries; + let p_remote_embedding_timeout_ms = remote_embedding_timeout_ms; + let p_search_cutoff_ms = search_cutoff_ms; + let p_snippet_threshold = snippet_threshold; + let p_sort_by = sort_by; + let p_split_join_tokens = split_join_tokens; + let p_stopwords = stopwords; + let p_synonym_num_typos = synonym_num_typos; + let p_synonym_prefix = synonym_prefix; + let p_text_match_type = text_match_type; + let p_typo_tokens_threshold = typo_tokens_threshold; + let p_use_cache = use_cache; + let p_vector_query = vector_query; + let p_voice_query = voice_query; + let p_multi_search_searches_parameter = multi_search_searches_parameter; + + let uri_str = format!("{}/multi_search", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); + + if let Some(ref param_value) = p_cache_ttl { + req_builder = req_builder.query(&[("cache_ttl", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_conversation { + req_builder = req_builder.query(&[("conversation", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_conversation_id { + req_builder = req_builder.query(&[("conversation_id", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_conversation_model_id { + req_builder = req_builder.query(&[("conversation_model_id", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_drop_tokens_mode { + req_builder = req_builder.query(&[("drop_tokens_mode", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_drop_tokens_threshold { + req_builder = req_builder.query(&[("drop_tokens_threshold", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_enable_highlight_v1 { + req_builder = req_builder.query(&[("enable_highlight_v1", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_enable_overrides { + req_builder = req_builder.query(&[("enable_overrides", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_enable_synonyms { + req_builder = req_builder.query(&[("enable_synonyms", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_enable_typos_for_alpha_numerical_tokens { + req_builder = req_builder.query(&[("enable_typos_for_alpha_numerical_tokens", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_enable_typos_for_numerical_tokens { + req_builder = req_builder.query(&[("enable_typos_for_numerical_tokens", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_exclude_fields { + req_builder = req_builder.query(&[("exclude_fields", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_exhaustive_search { + req_builder = req_builder.query(&[("exhaustive_search", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_facet_by { + req_builder = req_builder.query(&[("facet_by", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_facet_query { + req_builder = req_builder.query(&[("facet_query", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_facet_return_parent { + req_builder = req_builder.query(&[("facet_return_parent", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_facet_strategy { + req_builder = req_builder.query(&[("facet_strategy", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_filter_by { + req_builder = req_builder.query(&[("filter_by", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_filter_curated_hits { + req_builder = req_builder.query(&[("filter_curated_hits", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_group_by { + req_builder = req_builder.query(&[("group_by", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_group_limit { + req_builder = req_builder.query(&[("group_limit", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_group_missing_values { + req_builder = req_builder.query(&[("group_missing_values", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_hidden_hits { + req_builder = req_builder.query(&[("hidden_hits", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_highlight_affix_num_tokens { + req_builder = req_builder.query(&[("highlight_affix_num_tokens", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_highlight_end_tag { + req_builder = req_builder.query(&[("highlight_end_tag", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_highlight_fields { + req_builder = req_builder.query(&[("highlight_fields", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_highlight_full_fields { + req_builder = req_builder.query(&[("highlight_full_fields", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_highlight_start_tag { + req_builder = req_builder.query(&[("highlight_start_tag", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_include_fields { + req_builder = req_builder.query(&[("include_fields", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_infix { + req_builder = req_builder.query(&[("infix", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_limit { + req_builder = req_builder.query(&[("limit", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_max_candidates { + req_builder = req_builder.query(&[("max_candidates", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_max_extra_prefix { + req_builder = req_builder.query(&[("max_extra_prefix", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_max_extra_suffix { + req_builder = req_builder.query(&[("max_extra_suffix", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_max_facet_values { + req_builder = req_builder.query(&[("max_facet_values", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_max_filter_by_candidates { + req_builder = req_builder.query(&[("max_filter_by_candidates", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_min_len_1typo { + req_builder = req_builder.query(&[("min_len_1typo", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_min_len_2typo { + req_builder = req_builder.query(&[("min_len_2typo", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_num_typos { + req_builder = req_builder.query(&[("num_typos", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_offset { + req_builder = req_builder.query(&[("offset", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_override_tags { + req_builder = req_builder.query(&[("override_tags", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_page { + req_builder = req_builder.query(&[("page", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_per_page { + req_builder = req_builder.query(&[("per_page", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_pinned_hits { + req_builder = req_builder.query(&[("pinned_hits", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_pre_segmented_query { + req_builder = req_builder.query(&[("pre_segmented_query", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_prefix { + req_builder = req_builder.query(&[("prefix", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_preset { + req_builder = req_builder.query(&[("preset", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_prioritize_exact_match { + req_builder = req_builder.query(&[("prioritize_exact_match", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_prioritize_num_matching_fields { + req_builder = req_builder.query(&[("prioritize_num_matching_fields", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_prioritize_token_position { + req_builder = req_builder.query(&[("prioritize_token_position", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_q { + req_builder = req_builder.query(&[("q", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_query_by { + req_builder = req_builder.query(&[("query_by", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_query_by_weights { + req_builder = req_builder.query(&[("query_by_weights", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_remote_embedding_num_tries { + req_builder = req_builder.query(&[("remote_embedding_num_tries", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_remote_embedding_timeout_ms { + req_builder = req_builder.query(&[("remote_embedding_timeout_ms", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_search_cutoff_ms { + req_builder = req_builder.query(&[("search_cutoff_ms", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_snippet_threshold { + req_builder = req_builder.query(&[("snippet_threshold", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_sort_by { + req_builder = req_builder.query(&[("sort_by", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_split_join_tokens { + req_builder = req_builder.query(&[("split_join_tokens", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_stopwords { + req_builder = req_builder.query(&[("stopwords", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_synonym_num_typos { + req_builder = req_builder.query(&[("synonym_num_typos", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_synonym_prefix { + req_builder = req_builder.query(&[("synonym_prefix", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_text_match_type { + req_builder = req_builder.query(&[("text_match_type", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_typo_tokens_threshold { + req_builder = req_builder.query(&[("typo_tokens_threshold", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_use_cache { + req_builder = req_builder.query(&[("use_cache", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_vector_query { + req_builder = req_builder.query(&[("vector_query", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_voice_query { + req_builder = req_builder.query(&[("voice_query", ¶m_value.to_string())]); + } + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - local_var_req_builder = local_var_req_builder.json(&multi_search_searches_parameter); - - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + req_builder = req_builder.json(&p_multi_search_searches_parameter); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::MultiSearchResult`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::MultiSearchResult`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Search for documents in a collection that match the search criteria. -pub async fn search_collection serde::Deserialize<'d>>( - configuration: &configuration::Configuration, - collection_name: &str, - search_parameters: crate::models::SearchParameters, -) -> Result, Error> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/documents/search", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); - - local_var_req_builder = local_var_req_builder.query(&search_parameters); - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), +pub async fn search_collection(configuration: &configuration::Configuration, collection_name: &str, cache_ttl: Option, conversation: Option, conversation_id: Option<&str>, conversation_model_id: Option<&str>, drop_tokens_mode: Option, drop_tokens_threshold: Option, enable_highlight_v1: Option, enable_overrides: Option, enable_synonyms: Option, enable_typos_for_alpha_numerical_tokens: Option, enable_typos_for_numerical_tokens: Option, exclude_fields: Option<&str>, exhaustive_search: Option, facet_by: Option<&str>, facet_query: Option<&str>, facet_return_parent: Option<&str>, facet_strategy: Option<&str>, filter_by: Option<&str>, filter_curated_hits: Option, group_by: Option<&str>, group_limit: Option, group_missing_values: Option, hidden_hits: Option<&str>, highlight_affix_num_tokens: Option, highlight_end_tag: Option<&str>, highlight_fields: Option<&str>, highlight_full_fields: Option<&str>, highlight_start_tag: Option<&str>, include_fields: Option<&str>, infix: Option<&str>, limit: Option, max_candidates: Option, max_extra_prefix: Option, max_extra_suffix: Option, max_facet_values: Option, max_filter_by_candidates: Option, min_len_1typo: Option, min_len_2typo: Option, num_typos: Option<&str>, offset: Option, override_tags: Option<&str>, page: Option, per_page: Option, pinned_hits: Option<&str>, pre_segmented_query: Option, prefix: Option<&str>, preset: Option<&str>, prioritize_exact_match: Option, prioritize_num_matching_fields: Option, prioritize_token_position: Option, q: Option<&str>, query_by: Option<&str>, query_by_weights: Option<&str>, remote_embedding_num_tries: Option, remote_embedding_timeout_ms: Option, search_cutoff_ms: Option, snippet_threshold: Option, sort_by: Option<&str>, split_join_tokens: Option<&str>, stopwords: Option<&str>, synonym_num_typos: Option, synonym_prefix: Option, text_match_type: Option<&str>, typo_tokens_threshold: Option, use_cache: Option, vector_query: Option<&str>, voice_query: Option<&str>) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_cache_ttl = cache_ttl; + let p_conversation = conversation; + let p_conversation_id = conversation_id; + let p_conversation_model_id = conversation_model_id; + let p_drop_tokens_mode = drop_tokens_mode; + let p_drop_tokens_threshold = drop_tokens_threshold; + let p_enable_highlight_v1 = enable_highlight_v1; + let p_enable_overrides = enable_overrides; + let p_enable_synonyms = enable_synonyms; + let p_enable_typos_for_alpha_numerical_tokens = enable_typos_for_alpha_numerical_tokens; + let p_enable_typos_for_numerical_tokens = enable_typos_for_numerical_tokens; + let p_exclude_fields = exclude_fields; + let p_exhaustive_search = exhaustive_search; + let p_facet_by = facet_by; + let p_facet_query = facet_query; + let p_facet_return_parent = facet_return_parent; + let p_facet_strategy = facet_strategy; + let p_filter_by = filter_by; + let p_filter_curated_hits = filter_curated_hits; + let p_group_by = group_by; + let p_group_limit = group_limit; + let p_group_missing_values = group_missing_values; + let p_hidden_hits = hidden_hits; + let p_highlight_affix_num_tokens = highlight_affix_num_tokens; + let p_highlight_end_tag = highlight_end_tag; + let p_highlight_fields = highlight_fields; + let p_highlight_full_fields = highlight_full_fields; + let p_highlight_start_tag = highlight_start_tag; + let p_include_fields = include_fields; + let p_infix = infix; + let p_limit = limit; + let p_max_candidates = max_candidates; + let p_max_extra_prefix = max_extra_prefix; + let p_max_extra_suffix = max_extra_suffix; + let p_max_facet_values = max_facet_values; + let p_max_filter_by_candidates = max_filter_by_candidates; + let p_min_len_1typo = min_len_1typo; + let p_min_len_2typo = min_len_2typo; + let p_num_typos = num_typos; + let p_offset = offset; + let p_override_tags = override_tags; + let p_page = page; + let p_per_page = per_page; + let p_pinned_hits = pinned_hits; + let p_pre_segmented_query = pre_segmented_query; + let p_prefix = prefix; + let p_preset = preset; + let p_prioritize_exact_match = prioritize_exact_match; + let p_prioritize_num_matching_fields = prioritize_num_matching_fields; + let p_prioritize_token_position = prioritize_token_position; + let p_q = q; + let p_query_by = query_by; + let p_query_by_weights = query_by_weights; + let p_remote_embedding_num_tries = remote_embedding_num_tries; + let p_remote_embedding_timeout_ms = remote_embedding_timeout_ms; + let p_search_cutoff_ms = search_cutoff_ms; + let p_snippet_threshold = snippet_threshold; + let p_sort_by = sort_by; + let p_split_join_tokens = split_join_tokens; + let p_stopwords = stopwords; + let p_synonym_num_typos = synonym_num_typos; + let p_synonym_prefix = synonym_prefix; + let p_text_match_type = text_match_type; + let p_typo_tokens_threshold = typo_tokens_threshold; + let p_use_cache = use_cache; + let p_vector_query = vector_query; + let p_voice_query = voice_query; + + let uri_str = format!("{}/collections/{collectionName}/documents/search", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref param_value) = p_cache_ttl { + req_builder = req_builder.query(&[("cache_ttl", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_conversation { + req_builder = req_builder.query(&[("conversation", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_conversation_id { + req_builder = req_builder.query(&[("conversation_id", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_conversation_model_id { + req_builder = req_builder.query(&[("conversation_model_id", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_drop_tokens_mode { + req_builder = req_builder.query(&[("drop_tokens_mode", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_drop_tokens_threshold { + req_builder = req_builder.query(&[("drop_tokens_threshold", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_enable_highlight_v1 { + req_builder = req_builder.query(&[("enable_highlight_v1", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_enable_overrides { + req_builder = req_builder.query(&[("enable_overrides", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_enable_synonyms { + req_builder = req_builder.query(&[("enable_synonyms", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_enable_typos_for_alpha_numerical_tokens { + req_builder = req_builder.query(&[("enable_typos_for_alpha_numerical_tokens", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_enable_typos_for_numerical_tokens { + req_builder = req_builder.query(&[("enable_typos_for_numerical_tokens", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_exclude_fields { + req_builder = req_builder.query(&[("exclude_fields", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_exhaustive_search { + req_builder = req_builder.query(&[("exhaustive_search", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_facet_by { + req_builder = req_builder.query(&[("facet_by", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_facet_query { + req_builder = req_builder.query(&[("facet_query", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_facet_return_parent { + req_builder = req_builder.query(&[("facet_return_parent", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_facet_strategy { + req_builder = req_builder.query(&[("facet_strategy", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_filter_by { + req_builder = req_builder.query(&[("filter_by", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_filter_curated_hits { + req_builder = req_builder.query(&[("filter_curated_hits", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_group_by { + req_builder = req_builder.query(&[("group_by", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_group_limit { + req_builder = req_builder.query(&[("group_limit", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_group_missing_values { + req_builder = req_builder.query(&[("group_missing_values", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_hidden_hits { + req_builder = req_builder.query(&[("hidden_hits", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_highlight_affix_num_tokens { + req_builder = req_builder.query(&[("highlight_affix_num_tokens", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_highlight_end_tag { + req_builder = req_builder.query(&[("highlight_end_tag", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_highlight_fields { + req_builder = req_builder.query(&[("highlight_fields", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_highlight_full_fields { + req_builder = req_builder.query(&[("highlight_full_fields", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_highlight_start_tag { + req_builder = req_builder.query(&[("highlight_start_tag", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_include_fields { + req_builder = req_builder.query(&[("include_fields", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_infix { + req_builder = req_builder.query(&[("infix", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_limit { + req_builder = req_builder.query(&[("limit", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_max_candidates { + req_builder = req_builder.query(&[("max_candidates", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_max_extra_prefix { + req_builder = req_builder.query(&[("max_extra_prefix", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_max_extra_suffix { + req_builder = req_builder.query(&[("max_extra_suffix", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_max_facet_values { + req_builder = req_builder.query(&[("max_facet_values", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_max_filter_by_candidates { + req_builder = req_builder.query(&[("max_filter_by_candidates", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_min_len_1typo { + req_builder = req_builder.query(&[("min_len_1typo", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_min_len_2typo { + req_builder = req_builder.query(&[("min_len_2typo", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_num_typos { + req_builder = req_builder.query(&[("num_typos", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_offset { + req_builder = req_builder.query(&[("offset", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_override_tags { + req_builder = req_builder.query(&[("override_tags", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_page { + req_builder = req_builder.query(&[("page", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_per_page { + req_builder = req_builder.query(&[("per_page", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_pinned_hits { + req_builder = req_builder.query(&[("pinned_hits", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_pre_segmented_query { + req_builder = req_builder.query(&[("pre_segmented_query", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_prefix { + req_builder = req_builder.query(&[("prefix", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_preset { + req_builder = req_builder.query(&[("preset", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_prioritize_exact_match { + req_builder = req_builder.query(&[("prioritize_exact_match", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_prioritize_num_matching_fields { + req_builder = req_builder.query(&[("prioritize_num_matching_fields", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_prioritize_token_position { + req_builder = req_builder.query(&[("prioritize_token_position", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_q { + req_builder = req_builder.query(&[("q", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_query_by { + req_builder = req_builder.query(&[("query_by", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_query_by_weights { + req_builder = req_builder.query(&[("query_by_weights", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_remote_embedding_num_tries { + req_builder = req_builder.query(&[("remote_embedding_num_tries", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_remote_embedding_timeout_ms { + req_builder = req_builder.query(&[("remote_embedding_timeout_ms", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_search_cutoff_ms { + req_builder = req_builder.query(&[("search_cutoff_ms", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_snippet_threshold { + req_builder = req_builder.query(&[("snippet_threshold", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_sort_by { + req_builder = req_builder.query(&[("sort_by", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_split_join_tokens { + req_builder = req_builder.query(&[("split_join_tokens", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_stopwords { + req_builder = req_builder.query(&[("stopwords", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_synonym_num_typos { + req_builder = req_builder.query(&[("synonym_num_typos", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_synonym_prefix { + req_builder = req_builder.query(&[("synonym_prefix", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_text_match_type { + req_builder = req_builder.query(&[("text_match_type", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_typo_tokens_threshold { + req_builder = req_builder.query(&[("typo_tokens_threshold", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_use_cache { + req_builder = req_builder.query(&[("use_cache", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_vector_query { + req_builder = req_builder.query(&[("vector_query", ¶m_value.to_string())]); + } + if let Some(ref param_value) = p_voice_query { + req_builder = req_builder.query(&[("voice_query", ¶m_value.to_string())]); + } + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::SearchResult`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::SearchResult`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Update an individual document from a collection by using its ID. The update can be partial. -pub async fn update_document( - configuration: &configuration::Configuration, - collection_name: &str, - document_id: &str, - body: serde_json::Value, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/documents/{documentId}", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name), - documentId = crate::apis::urlencode(document_id) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::PATCH, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), +pub async fn update_document(configuration: &configuration::Configuration, collection_name: &str, document_id: &str, body: serde_json::Value, dirty_values: Option) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_document_id = document_id; + let p_body = body; + let p_dirty_values = dirty_values; + + let uri_str = format!("{}/collections/{collectionName}/documents/{documentId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), documentId=crate::apis::urlencode(p_document_id)); + let mut req_builder = configuration.client.request(reqwest::Method::PATCH, &uri_str); + + if let Some(ref param_value) = p_dirty_values { + req_builder = req_builder.query(&[("dirty_values", ¶m_value.to_string())]); + } + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - local_var_req_builder = local_var_req_builder.json(&body); - - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + req_builder = req_builder.json(&p_body); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `serde_json::Value`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `serde_json::Value`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// The filter_by query parameter is used to filter to specify a condition against which the documents are matched. The request body contains the fields that should be updated for any documents that match the filter condition. This endpoint is only available if the Typesense server is version `0.25.0.rc12` or later. -pub async fn update_documents( - configuration: &configuration::Configuration, - collection_name: &str, - body: serde_json::Value, - update_documents_parameters: Option< - crate::models::UpdateDocumentsUpdateDocumentsParametersParameter, - >, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/documents", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::PATCH, local_var_uri_str.as_str()); - - if let Some(ref update_documents_parameters_ref) = update_documents_parameters { - local_var_req_builder = local_var_req_builder.query(&update_documents_parameters_ref); - } - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), - }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); - }; - local_var_req_builder = local_var_req_builder.json(&body); +pub async fn update_documents(configuration: &configuration::Configuration, collection_name: &str, body: serde_json::Value, filter_by: Option<&str>) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_body = body; + let p_filter_by = filter_by; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; + let uri_str = format!("{}/collections/{collectionName}/documents", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let mut req_builder = configuration.client.request(reqwest::Method::PATCH, &uri_str); - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) - } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, + if let Some(ref param_value) = p_filter_by { + req_builder = req_builder.query(&[("filter_by", ¶m_value.to_string())]); + } + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - Err(Error::ResponseError(local_var_error)) + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + req_builder = req_builder.json(&p_body); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::UpdateDocuments200Response`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::UpdateDocuments200Response`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Create or update an override to promote certain documents over others. Using overrides, you can include or exclude specific documents for a given query. -pub async fn upsert_search_override( - configuration: &configuration::Configuration, - collection_name: &str, - override_id: &str, - search_override_schema: crate::models::SearchOverrideSchema, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/overrides/{overrideId}", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name), - overrideId = crate::apis::urlencode(override_id) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::PUT, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), - }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); - }; - local_var_req_builder = local_var_req_builder.json(&search_override_schema); +pub async fn upsert_search_override(configuration: &configuration::Configuration, collection_name: &str, override_id: &str, search_override_schema: models::SearchOverrideSchema) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_override_id = override_id; + let p_search_override_schema = search_override_schema; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; + let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), overrideId=crate::apis::urlencode(p_override_id)); + let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) - } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } -} - -/// Create or update a synonym to define search terms that should be considered equivalent. -pub async fn upsert_search_synonym( - configuration: &configuration::Configuration, - collection_name: &str, - synonym_id: &str, - search_synonym_schema: crate::models::SearchSynonymSchema, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/synonyms/{synonymId}", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name), - synonymId = crate::apis::urlencode(synonym_id) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::PUT, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - local_var_req_builder = local_var_req_builder.json(&search_synonym_schema); - - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + req_builder = req_builder.json(&p_search_override_schema); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::SearchOverride`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::SearchOverride`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } + diff --git a/typesense_codegen/src/apis/health_api.rs b/typesense_codegen/src/apis/health_api.rs index d2978fb..18fe2a5 100644 --- a/typesense_codegen/src/apis/health_api.rs +++ b/typesense_codegen/src/apis/health_api.rs @@ -3,13 +3,17 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ -use super::{configuration, Error}; -use crate::apis::ResponseContent; + +use reqwest; +use serde::{Deserialize, Serialize, de::Error as _}; +use crate::{apis::ResponseContent, models}; +use super::{Error, configuration, ContentType}; + /// struct for typed errors of method [`health`] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -18,47 +22,47 @@ pub enum HealthError { UnknownValue(serde_json::Value), } -/// Checks if Typesense server is ready to accept requests. -pub async fn health( - configuration: &configuration::Configuration, -) -> Result> { - let local_var_configuration = configuration; - let local_var_client = &local_var_configuration.client; +/// Checks if Typesense server is ready to accept requests. +pub async fn health(configuration: &configuration::Configuration, ) -> Result> { - let local_var_uri_str = format!("{}/health", local_var_configuration.base_path); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + let uri_str = format!("{}/health", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::HealthStatus`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::HealthStatus`")))), + } } else { - let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } + diff --git a/typesense_codegen/src/apis/keys_api.rs b/typesense_codegen/src/apis/keys_api.rs index 6b42ab8..fbc0f24 100644 --- a/typesense_codegen/src/apis/keys_api.rs +++ b/typesense_codegen/src/apis/keys_api.rs @@ -3,20 +3,24 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ -use super::{configuration, Error}; -use crate::apis::ResponseContent; + +use reqwest; +use serde::{Deserialize, Serialize, de::Error as _}; +use crate::{apis::ResponseContent, models}; +use super::{Error, configuration, ContentType}; + /// struct for typed errors of method [`create_key`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum CreateKeyError { - Status400(crate::models::ApiResponse), - Status409(crate::models::ApiResponse), + Status400(models::ApiResponse), + Status409(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -24,8 +28,8 @@ pub enum CreateKeyError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum DeleteKeyError { - Status400(crate::models::ApiResponse), - Status404(crate::models::ApiResponse), + Status400(models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -33,7 +37,7 @@ pub enum DeleteKeyError { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum GetKeyError { - Status404(crate::models::ApiResponse), + Status404(models::ApiResponse), UnknownValue(serde_json::Value), } @@ -44,194 +48,181 @@ pub enum GetKeysError { UnknownValue(serde_json::Value), } -/// Create an API Key with fine-grain access control. You can restrict access on both a per-collection and per-action level. The generated key is returned only during creation. You want to store this key carefully in a secure place. -pub async fn create_key( - configuration: &configuration::Configuration, - api_key_schema: Option, -) -> Result> { - let local_var_configuration = configuration; - let local_var_client = &local_var_configuration.client; +/// Create an API Key with fine-grain access control. You can restrict access on both a per-collection and per-action level. The generated key is returned only during creation. You want to store this key carefully in a secure place. +pub async fn create_key(configuration: &configuration::Configuration, api_key_schema: Option) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_api_key_schema = api_key_schema; - let local_var_uri_str = format!("{}/keys", local_var_configuration.base_path); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + let uri_str = format!("{}/keys", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - local_var_req_builder = local_var_req_builder.json(&api_key_schema); - - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + req_builder = req_builder.json(&p_api_key_schema); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::ApiKey`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::ApiKey`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } -pub async fn delete_key( - configuration: &configuration::Configuration, - key_id: i64, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/keys/{keyId}", - local_var_configuration.base_path, - keyId = key_id - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::DELETE, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); +pub async fn delete_key(configuration: &configuration::Configuration, key_id: i64) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_key_id = key_id; + + let uri_str = format!("{}/keys/{keyId}", configuration.base_path, keyId=p_key_id); + let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::ApiKeyDeleteResponse`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::ApiKeyDeleteResponse`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } /// Retrieve (metadata about) a key. Only the key prefix is returned when you retrieve a key. Due to security reasons, only the create endpoint returns the full API key. -pub async fn get_key( - configuration: &configuration::Configuration, - key_id: i64, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/keys/{keyId}", - local_var_configuration.base_path, - keyId = key_id - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); +pub async fn get_key(configuration: &configuration::Configuration, key_id: i64) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_key_id = key_id; + + let uri_str = format!("{}/keys/{keyId}", configuration.base_path, keyId=p_key_id); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::ApiKey`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::ApiKey`")))), + } } else { - let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } -pub async fn get_keys( - configuration: &configuration::Configuration, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; +pub async fn get_keys(configuration: &configuration::Configuration, ) -> Result> { - let local_var_uri_str = format!("{}/keys", local_var_configuration.base_path); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + let uri_str = format!("{}/keys", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::ApiKeysResponse`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::ApiKeysResponse`")))), + } } else { - let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } + diff --git a/typesense_codegen/src/apis/mod.rs b/typesense_codegen/src/apis/mod.rs index 130e195..e9eb817 100644 --- a/typesense_codegen/src/apis/mod.rs +++ b/typesense_codegen/src/apis/mod.rs @@ -16,7 +16,7 @@ pub enum Error { ResponseError(ResponseContent), } -impl fmt::Display for Error { +impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let (module, e) = match self { Error::Reqwest(e) => ("reqwest", e.to_string()), @@ -28,7 +28,7 @@ impl fmt::Display for Error { } } -impl error::Error for Error { +impl error::Error for Error { fn source(&self) -> Option<&(dyn error::Error + 'static)> { Some(match self { Error::Reqwest(e) => e, @@ -39,19 +39,19 @@ impl error::Error for Error { } } -impl From for Error { +impl From for Error { fn from(e: reqwest::Error) -> Self { Error::Reqwest(e) } } -impl From for Error { +impl From for Error { fn from(e: serde_json::Error) -> Self { Error::Serde(e) } } -impl From for Error { +impl From for Error { fn from(e: std::io::Error) -> Self { Error::Io(e) } @@ -78,10 +78,8 @@ pub fn parse_deep_object(prefix: &str, value: &serde_json::Value) -> Vec<(String value, )); } - } - serde_json::Value::String(s) => { - params.push((format!("{}[{}]", prefix, key), s.clone())) - } + }, + serde_json::Value::String(s) => params.push((format!("{}[{}]", prefix, key), s.clone())), _ => params.push((format!("{}[{}]", prefix, key), value.to_string())), } } @@ -92,14 +90,40 @@ pub fn parse_deep_object(prefix: &str, value: &serde_json::Value) -> Vec<(String unimplemented!("Only objects are supported with style=deepObject") } +/// Internal use only +/// A content type supported by this client. +#[allow(dead_code)] +enum ContentType { + Json, + Text, + Unsupported(String) +} + +impl From<&str> for ContentType { + fn from(content_type: &str) -> Self { + if content_type.starts_with("application") && content_type.contains("json") { + return Self::Json; + } else if content_type.starts_with("text/plain") { + return Self::Text; + } else { + return Self::Unsupported(content_type.to_string()); + } + } +} + pub mod analytics_api; pub mod collections_api; +pub mod conversations_api; +pub mod curation_api; pub mod debug_api; pub mod documents_api; pub mod health_api; pub mod keys_api; pub mod operations_api; pub mod override_api; -pub mod promote_api; +pub mod presets_api; +pub mod stemming_api; +pub mod stopwords_api; +pub mod synonyms_api; pub mod configuration; diff --git a/typesense_codegen/src/apis/operations_api.rs b/typesense_codegen/src/apis/operations_api.rs index f7c2877..3c838b3 100644 --- a/typesense_codegen/src/apis/operations_api.rs +++ b/typesense_codegen/src/apis/operations_api.rs @@ -3,13 +3,38 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ -use super::{configuration, Error}; -use crate::apis::ResponseContent; + +use reqwest; +use serde::{Deserialize, Serialize, de::Error as _}; +use crate::{apis::ResponseContent, models}; +use super::{Error, configuration, ContentType}; + + +/// struct for typed errors of method [`get_schema_changes`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum GetSchemaChangesError { + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`retrieve_api_stats`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum RetrieveApiStatsError { + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`retrieve_metrics`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum RetrieveMetricsError { + UnknownValue(serde_json::Value), +} /// struct for typed errors of method [`take_snapshot`] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -25,96 +50,222 @@ pub enum VoteError { UnknownValue(serde_json::Value), } -/// Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. You can then backup the snapshot directory that gets created and later restore it as a data directory, as needed. -pub async fn take_snapshot( - configuration: &configuration::Configuration, - snapshot_path: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!("{}/operations/snapshot", local_var_configuration.base_path); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); - - local_var_req_builder = - local_var_req_builder.query(&[("snapshot_path", &snapshot_path.to_string())]); - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); + +/// Returns the status of any ongoing schema change operations. If no schema changes are in progress, returns an empty response. +pub async fn get_schema_changes(configuration: &configuration::Configuration, ) -> Result, Error> { + + let uri_str = format!("{}/operations/schema_changes", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `Vec<models::SchemaChangeStatus>`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `Vec<models::SchemaChangeStatus>`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Retrieve the stats about API endpoints. +pub async fn retrieve_api_stats(configuration: &configuration::Configuration, ) -> Result> { + + let uri_str = format!("{}/stats.json", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - Err(Error::ResponseError(local_var_error)) + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::ApiStatsResponse`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::ApiStatsResponse`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } -/// Triggers a follower node to initiate the raft voting process, which triggers leader re-election. The follower node that you run this operation against will become the new leader, once this command succeeds. -pub async fn vote( - configuration: &configuration::Configuration, -) -> Result> { - let local_var_configuration = configuration; +/// Retrieve the metrics. +pub async fn retrieve_metrics(configuration: &configuration::Configuration, ) -> Result> { - let local_var_client = &local_var_configuration.client; + let uri_str = format!("{}/metrics.json", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; - let local_var_uri_str = format!("{}/operations/vote", local_var_configuration.base_path); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `serde_json::Value`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `serde_json::Value`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), +} + +/// Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. You can then backup the snapshot directory that gets created and later restore it as a data directory, as needed. +pub async fn take_snapshot(configuration: &configuration::Configuration, snapshot_path: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_snapshot_path = snapshot_path; + + let uri_str = format!("{}/operations/snapshot", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); + + req_builder = req_builder.query(&[("snapshot_path", &p_snapshot_path.to_string())]); + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::SuccessStatus`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::SuccessStatus`")))), + } } else { - let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Triggers a follower node to initiate the raft voting process, which triggers leader re-election. The follower node that you run this operation against will become the new leader, once this command succeeds. +pub async fn vote(configuration: &configuration::Configuration, ) -> Result> { + + let uri_str = format!("{}/operations/vote", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - Err(Error::ResponseError(local_var_error)) + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::SuccessStatus`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::SuccessStatus`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } + diff --git a/typesense_codegen/src/apis/override_api.rs b/typesense_codegen/src/apis/override_api.rs index 069cc4e..fca6378 100644 --- a/typesense_codegen/src/apis/override_api.rs +++ b/typesense_codegen/src/apis/override_api.rs @@ -3,13 +3,17 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ -use super::{configuration, Error}; -use crate::apis::ResponseContent; + +use reqwest; +use serde::{Deserialize, Serialize, de::Error as _}; +use crate::{apis::ResponseContent, models}; +use super::{Error, configuration, ContentType}; + /// struct for typed errors of method [`get_search_override`] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -18,55 +22,50 @@ pub enum GetSearchOverrideError { UnknownValue(serde_json::Value), } -/// Retrieve the details of a search override, given its id. -pub async fn get_search_override( - configuration: &configuration::Configuration, - collection_name: &str, - override_id: &str, -) -> Result> { - let local_var_configuration = configuration; - let local_var_client = &local_var_configuration.client; +/// Retrieve the details of a search override, given its id. +pub async fn get_search_override(configuration: &configuration::Configuration, collection_name: &str, override_id: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_override_id = override_id; - let local_var_uri_str = format!( - "{}/collections/{collectionName}/overrides/{overrideId}", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name), - overrideId = crate::apis::urlencode(override_id) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), overrideId=crate::apis::urlencode(p_override_id)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::SearchOverride`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::SearchOverride`")))), + } } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) } } + diff --git a/typesense_codegen/src/apis/presets_api.rs b/typesense_codegen/src/apis/presets_api.rs new file mode 100644 index 0000000..f2f1eb4 --- /dev/null +++ b/typesense_codegen/src/apis/presets_api.rs @@ -0,0 +1,229 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + + +use reqwest; +use serde::{Deserialize, Serialize, de::Error as _}; +use crate::{apis::ResponseContent, models}; +use super::{Error, configuration, ContentType}; + + +/// struct for typed errors of method [`delete_preset`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum DeletePresetError { + Status404(models::ApiResponse), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`retrieve_all_presets`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum RetrieveAllPresetsError { + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`retrieve_preset`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum RetrievePresetError { + Status404(models::ApiResponse), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`upsert_preset`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum UpsertPresetError { + Status400(models::ApiResponse), + UnknownValue(serde_json::Value), +} + + +/// Permanently deletes a preset, given it's name. +pub async fn delete_preset(configuration: &configuration::Configuration, preset_id: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_preset_id = preset_id; + + let uri_str = format!("{}/presets/{presetId}", configuration.base_path, presetId=crate::apis::urlencode(p_preset_id)); + let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::PresetDeleteSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::PresetDeleteSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Retrieve the details of all presets +pub async fn retrieve_all_presets(configuration: &configuration::Configuration, ) -> Result> { + + let uri_str = format!("{}/presets", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::PresetsRetrieveSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::PresetsRetrieveSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Retrieve the details of a preset, given it's name. +pub async fn retrieve_preset(configuration: &configuration::Configuration, preset_id: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_preset_id = preset_id; + + let uri_str = format!("{}/presets/{presetId}", configuration.base_path, presetId=crate::apis::urlencode(p_preset_id)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::PresetSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::PresetSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Create or update an existing preset. +pub async fn upsert_preset(configuration: &configuration::Configuration, preset_id: &str, preset_upsert_schema: models::PresetUpsertSchema) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_preset_id = preset_id; + let p_preset_upsert_schema = preset_upsert_schema; + + let uri_str = format!("{}/presets/{presetId}", configuration.base_path, presetId=crate::apis::urlencode(p_preset_id)); + let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + req_builder = req_builder.json(&p_preset_upsert_schema); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::PresetSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::PresetSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + diff --git a/typesense_codegen/src/apis/promote_api.rs b/typesense_codegen/src/apis/promote_api.rs deleted file mode 100644 index 052d70f..0000000 --- a/typesense_codegen/src/apis/promote_api.rs +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Typesense API - * - * An open source search engine for building delightful search experiences. - * - * The version of the OpenAPI document: 0.25.0 - * - * Generated by: https://openapi-generator.tech - */ - -use super::{configuration, Error}; -use crate::apis::ResponseContent; - -/// struct for typed errors of method [`delete_search_override`] -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(untagged)] -pub enum DeleteSearchOverrideError { - Status404(crate::models::ApiResponse), - UnknownValue(serde_json::Value), -} - -/// struct for typed errors of method [`get_search_overrides`] -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(untagged)] -pub enum GetSearchOverridesError { - UnknownValue(serde_json::Value), -} - -/// struct for typed errors of method [`upsert_search_override`] -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(untagged)] -pub enum UpsertSearchOverrideError { - Status404(crate::models::ApiResponse), - UnknownValue(serde_json::Value), -} - -pub async fn delete_search_override( - configuration: &configuration::Configuration, - collection_name: &str, - override_id: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/overrides/{overrideId}", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name), - overrideId = crate::apis::urlencode(override_id) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::DELETE, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), - }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); - }; - - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) - } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) - } -} - -pub async fn get_search_overrides( - configuration: &configuration::Configuration, - collection_name: &str, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/overrides", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), - }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); - }; - - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) - } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) - } -} - -/// Create or update an override to promote certain documents over others. Using overrides, you can include or exclude specific documents for a given query. -pub async fn upsert_search_override( - configuration: &configuration::Configuration, - collection_name: &str, - override_id: &str, - search_override_schema: crate::models::SearchOverrideSchema, -) -> Result> { - let local_var_configuration = configuration; - - let local_var_client = &local_var_configuration.client; - - let local_var_uri_str = format!( - "{}/collections/{collectionName}/overrides/{overrideId}", - local_var_configuration.base_path, - collectionName = crate::apis::urlencode(collection_name), - overrideId = crate::apis::urlencode(override_id) - ); - let mut local_var_req_builder = - local_var_client.request(reqwest::Method::PUT, local_var_uri_str.as_str()); - - if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { - local_var_req_builder = - local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent); - } - if let Some(ref local_var_apikey) = local_var_configuration.api_key { - let local_var_key = &local_var_apikey.key; - let local_var_value = match local_var_apikey.prefix { - Some(ref local_var_prefix) => format!("{} {}", local_var_prefix, local_var_key), - None => local_var_key.clone(), - }; - local_var_req_builder = - local_var_req_builder.header("X-TYPESENSE-API-KEY", local_var_value); - }; - local_var_req_builder = local_var_req_builder.json(&search_override_schema); - - let local_var_req = local_var_req_builder.build()?; - let local_var_resp = local_var_client.execute(local_var_req).await?; - - let local_var_status = local_var_resp.status(); - let local_var_content = local_var_resp.text().await?; - - if !local_var_status.is_client_error() && !local_var_status.is_server_error() { - serde_json::from_str(&local_var_content).map_err(Error::from) - } else { - let local_var_entity: Option = - serde_json::from_str(&local_var_content).ok(); - let local_var_error = ResponseContent { - status: local_var_status, - content: local_var_content, - entity: local_var_entity, - }; - Err(Error::ResponseError(local_var_error)) - } -} diff --git a/typesense_codegen/src/apis/stemming_api.rs b/typesense_codegen/src/apis/stemming_api.rs new file mode 100644 index 0000000..e3b59ae --- /dev/null +++ b/typesense_codegen/src/apis/stemming_api.rs @@ -0,0 +1,177 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + + +use reqwest; +use serde::{Deserialize, Serialize, de::Error as _}; +use crate::{apis::ResponseContent, models}; +use super::{Error, configuration, ContentType}; + + +/// struct for typed errors of method [`get_stemming_dictionary`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum GetStemmingDictionaryError { + Status404(models::ApiResponse), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`import_stemming_dictionary`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ImportStemmingDictionaryError { + Status400(models::ApiResponse), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`list_stemming_dictionaries`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ListStemmingDictionariesError { + UnknownValue(serde_json::Value), +} + + +/// Fetch details of a specific stemming dictionary. +pub async fn get_stemming_dictionary(configuration: &configuration::Configuration, dictionary_id: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_dictionary_id = dictionary_id; + + let uri_str = format!("{}/stemming/dictionaries/{dictionaryId}", configuration.base_path, dictionaryId=crate::apis::urlencode(p_dictionary_id)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::StemmingDictionary`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::StemmingDictionary`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Upload a JSONL file containing word mappings to create or update a stemming dictionary. +pub async fn import_stemming_dictionary(configuration: &configuration::Configuration, id: &str, body: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_id = id; + let p_body = body; + + let uri_str = format!("{}/stemming/dictionaries/import", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); + + req_builder = req_builder.query(&[("id", &p_id.to_string())]); + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + req_builder = req_builder.json(&p_body); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `String`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `String`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Retrieve a list of all available stemming dictionaries. +pub async fn list_stemming_dictionaries(configuration: &configuration::Configuration, ) -> Result> { + + let uri_str = format!("{}/stemming/dictionaries", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::ListStemmingDictionaries200Response`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::ListStemmingDictionaries200Response`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + diff --git a/typesense_codegen/src/apis/stopwords_api.rs b/typesense_codegen/src/apis/stopwords_api.rs new file mode 100644 index 0000000..8889f3f --- /dev/null +++ b/typesense_codegen/src/apis/stopwords_api.rs @@ -0,0 +1,229 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + + +use reqwest; +use serde::{Deserialize, Serialize, de::Error as _}; +use crate::{apis::ResponseContent, models}; +use super::{Error, configuration, ContentType}; + + +/// struct for typed errors of method [`delete_stopwords_set`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum DeleteStopwordsSetError { + Status404(models::ApiResponse), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`retrieve_stopwords_set`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum RetrieveStopwordsSetError { + Status404(models::ApiResponse), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`retrieve_stopwords_sets`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum RetrieveStopwordsSetsError { + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`upsert_stopwords_set`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum UpsertStopwordsSetError { + Status400(models::ApiResponse), + UnknownValue(serde_json::Value), +} + + +/// Permanently deletes a stopwords set, given it's name. +pub async fn delete_stopwords_set(configuration: &configuration::Configuration, set_id: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_set_id = set_id; + + let uri_str = format!("{}/stopwords/{setId}", configuration.base_path, setId=crate::apis::urlencode(p_set_id)); + let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::DeleteStopwordsSet200Response`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::DeleteStopwordsSet200Response`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Retrieve the details of a stopwords set, given it's name. +pub async fn retrieve_stopwords_set(configuration: &configuration::Configuration, set_id: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_set_id = set_id; + + let uri_str = format!("{}/stopwords/{setId}", configuration.base_path, setId=crate::apis::urlencode(p_set_id)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::StopwordsSetRetrieveSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::StopwordsSetRetrieveSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Retrieve the details of all stopwords sets +pub async fn retrieve_stopwords_sets(configuration: &configuration::Configuration, ) -> Result> { + + let uri_str = format!("{}/stopwords", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::StopwordsSetsRetrieveAllSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::StopwordsSetsRetrieveAllSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. +pub async fn upsert_stopwords_set(configuration: &configuration::Configuration, set_id: &str, stopwords_set_upsert_schema: models::StopwordsSetUpsertSchema) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_set_id = set_id; + let p_stopwords_set_upsert_schema = stopwords_set_upsert_schema; + + let uri_str = format!("{}/stopwords/{setId}", configuration.base_path, setId=crate::apis::urlencode(p_set_id)); + let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + req_builder = req_builder.json(&p_stopwords_set_upsert_schema); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::StopwordsSetSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::StopwordsSetSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + diff --git a/typesense_codegen/src/apis/synonyms_api.rs b/typesense_codegen/src/apis/synonyms_api.rs new file mode 100644 index 0000000..39b3130 --- /dev/null +++ b/typesense_codegen/src/apis/synonyms_api.rs @@ -0,0 +1,233 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + + +use reqwest; +use serde::{Deserialize, Serialize, de::Error as _}; +use crate::{apis::ResponseContent, models}; +use super::{Error, configuration, ContentType}; + + +/// struct for typed errors of method [`delete_search_synonym`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum DeleteSearchSynonymError { + Status404(models::ApiResponse), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`get_search_synonym`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum GetSearchSynonymError { + Status404(models::ApiResponse), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`get_search_synonyms`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum GetSearchSynonymsError { + Status404(models::ApiResponse), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`upsert_search_synonym`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum UpsertSearchSynonymError { + Status404(models::ApiResponse), + UnknownValue(serde_json::Value), +} + + +pub async fn delete_search_synonym(configuration: &configuration::Configuration, collection_name: &str, synonym_id: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_synonym_id = synonym_id; + + let uri_str = format!("{}/collections/{collectionName}/synonyms/{synonymId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), synonymId=crate::apis::urlencode(p_synonym_id)); + let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::SearchSynonymDeleteResponse`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::SearchSynonymDeleteResponse`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Retrieve the details of a search synonym, given its id. +pub async fn get_search_synonym(configuration: &configuration::Configuration, collection_name: &str, synonym_id: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_synonym_id = synonym_id; + + let uri_str = format!("{}/collections/{collectionName}/synonyms/{synonymId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), synonymId=crate::apis::urlencode(p_synonym_id)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::SearchSynonym`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::SearchSynonym`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +pub async fn get_search_synonyms(configuration: &configuration::Configuration, collection_name: &str) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + + let uri_str = format!("{}/collections/{collectionName}/synonyms", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::SearchSynonymsResponse`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::SearchSynonymsResponse`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Create or update a synonym to define search terms that should be considered equivalent. +pub async fn upsert_search_synonym(configuration: &configuration::Configuration, collection_name: &str, synonym_id: &str, search_synonym_schema: models::SearchSynonymSchema) -> Result> { + // add a prefix to parameters to efficiently prevent name collisions + let p_collection_name = collection_name; + let p_synonym_id = synonym_id; + let p_search_synonym_schema = search_synonym_schema; + + let uri_str = format!("{}/collections/{collectionName}/synonyms/{synonymId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), synonymId=crate::apis::urlencode(p_synonym_id)); + let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + req_builder = req_builder.json(&p_search_synonym_schema); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::SearchSynonym`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::SearchSynonym`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + diff --git a/typesense_codegen/src/lib.rs b/typesense_codegen/src/lib.rs index 4c8b05d..e152062 100644 --- a/typesense_codegen/src/lib.rs +++ b/typesense_codegen/src/lib.rs @@ -1,5 +1,11 @@ -#[macro_use] -extern crate serde_derive; +#![allow(unused_imports)] +#![allow(clippy::too_many_arguments)] + +extern crate serde_repr; +extern crate serde; +extern crate serde_json; +extern crate url; +extern crate reqwest; pub mod apis; pub mod models; diff --git a/typesense_codegen/src/models/analytics_event_create_response.rs b/typesense_codegen/src/models/analytics_event_create_response.rs new file mode 100644 index 0000000..f3f37ca --- /dev/null +++ b/typesense_codegen/src/models/analytics_event_create_response.rs @@ -0,0 +1,27 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct AnalyticsEventCreateResponse { + #[serde(rename = "ok")] + pub ok: bool, +} + +impl AnalyticsEventCreateResponse { + pub fn new(ok: bool) -> AnalyticsEventCreateResponse { + AnalyticsEventCreateResponse { + ok, + } + } +} + diff --git a/typesense_codegen/src/models/analytics_event_create_schema.rs b/typesense_codegen/src/models/analytics_event_create_schema.rs new file mode 100644 index 0000000..335ee31 --- /dev/null +++ b/typesense_codegen/src/models/analytics_event_create_schema.rs @@ -0,0 +1,33 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct AnalyticsEventCreateSchema { + #[serde(rename = "data")] + pub data: serde_json::Value, + #[serde(rename = "name")] + pub name: String, + #[serde(rename = "type")] + pub r#type: String, +} + +impl AnalyticsEventCreateSchema { + pub fn new(data: serde_json::Value, name: String, r#type: String) -> AnalyticsEventCreateSchema { + AnalyticsEventCreateSchema { + data, + name, + r#type, + } + } +} + diff --git a/typesense_codegen/src/models/analytics_rule_delete_response.rs b/typesense_codegen/src/models/analytics_rule_delete_response.rs new file mode 100644 index 0000000..db39d65 --- /dev/null +++ b/typesense_codegen/src/models/analytics_rule_delete_response.rs @@ -0,0 +1,27 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct AnalyticsRuleDeleteResponse { + #[serde(rename = "name")] + pub name: String, +} + +impl AnalyticsRuleDeleteResponse { + pub fn new(name: String) -> AnalyticsRuleDeleteResponse { + AnalyticsRuleDeleteResponse { + name, + } + } +} + diff --git a/typesense_codegen/src/models/analytics_rule_parameters.rs b/typesense_codegen/src/models/analytics_rule_parameters.rs index 32b4de4..c46ff8b 100644 --- a/typesense_codegen/src/models/analytics_rule_parameters.rs +++ b/typesense_codegen/src/models/analytics_rule_parameters.rs @@ -3,31 +3,34 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct AnalyticsRuleParameters { - #[serde(rename = "source")] - pub source: Box, #[serde(rename = "destination")] - pub destination: Box, - #[serde(rename = "limit")] - pub limit: i32, + pub destination: Box, + #[serde(rename = "expand_query", skip_serializing_if = "Option::is_none")] + pub expand_query: Option, + #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] + pub limit: Option, + #[serde(rename = "source")] + pub source: Box, } impl AnalyticsRuleParameters { - pub fn new( - source: crate::models::AnalyticsRuleParametersSource, - destination: crate::models::AnalyticsRuleParametersDestination, - limit: i32, - ) -> AnalyticsRuleParameters { + pub fn new(destination: models::AnalyticsRuleParametersDestination, source: models::AnalyticsRuleParametersSource) -> AnalyticsRuleParameters { AnalyticsRuleParameters { - source: Box::new(source), destination: Box::new(destination), - limit, + expand_query: None, + limit: None, + source: Box::new(source), } } } + diff --git a/typesense_codegen/src/models/analytics_rule_parameters_destination.rs b/typesense_codegen/src/models/analytics_rule_parameters_destination.rs index b713af8..73d40ed 100644 --- a/typesense_codegen/src/models/analytics_rule_parameters_destination.rs +++ b/typesense_codegen/src/models/analytics_rule_parameters_destination.rs @@ -3,19 +3,28 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct AnalyticsRuleParametersDestination { - #[serde(rename = "collection", skip_serializing_if = "Option::is_none")] - pub collection: Option, + #[serde(rename = "collection")] + pub collection: String, + #[serde(rename = "counter_field", skip_serializing_if = "Option::is_none")] + pub counter_field: Option, } impl AnalyticsRuleParametersDestination { - pub fn new() -> AnalyticsRuleParametersDestination { - AnalyticsRuleParametersDestination { collection: None } + pub fn new(collection: String) -> AnalyticsRuleParametersDestination { + AnalyticsRuleParametersDestination { + collection, + counter_field: None, + } } } + diff --git a/typesense_codegen/src/models/analytics_rule_parameters_source.rs b/typesense_codegen/src/models/analytics_rule_parameters_source.rs index ae055fa..01a3c53 100644 --- a/typesense_codegen/src/models/analytics_rule_parameters_source.rs +++ b/typesense_codegen/src/models/analytics_rule_parameters_source.rs @@ -3,19 +3,28 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct AnalyticsRuleParametersSource { - #[serde(rename = "collections", skip_serializing_if = "Option::is_none")] - pub collections: Option>, + #[serde(rename = "collections")] + pub collections: Vec, + #[serde(rename = "events", skip_serializing_if = "Option::is_none")] + pub events: Option>, } impl AnalyticsRuleParametersSource { - pub fn new() -> AnalyticsRuleParametersSource { - AnalyticsRuleParametersSource { collections: None } + pub fn new(collections: Vec) -> AnalyticsRuleParametersSource { + AnalyticsRuleParametersSource { + collections, + events: None, + } } } + diff --git a/typesense_codegen/src/models/analytics_rule_parameters_source_events_inner.rs b/typesense_codegen/src/models/analytics_rule_parameters_source_events_inner.rs new file mode 100644 index 0000000..da0a8ae --- /dev/null +++ b/typesense_codegen/src/models/analytics_rule_parameters_source_events_inner.rs @@ -0,0 +1,33 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct AnalyticsRuleParametersSourceEventsInner { + #[serde(rename = "name")] + pub name: String, + #[serde(rename = "type")] + pub r#type: String, + #[serde(rename = "weight")] + pub weight: f32, +} + +impl AnalyticsRuleParametersSourceEventsInner { + pub fn new(name: String, r#type: String, weight: f32) -> AnalyticsRuleParametersSourceEventsInner { + AnalyticsRuleParametersSourceEventsInner { + name, + r#type, + weight, + } + } +} + diff --git a/typesense_codegen/src/models/analytics_rule_schema.rs b/typesense_codegen/src/models/analytics_rule_schema.rs index 4d9c266..e1e1cd2 100644 --- a/typesense_codegen/src/models/analytics_rule_schema.rs +++ b/typesense_codegen/src/models/analytics_rule_schema.rs @@ -3,31 +3,47 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct AnalyticsRuleSchema { + #[serde(rename = "params")] + pub params: Box, + #[serde(rename = "type")] + pub r#type: Type, #[serde(rename = "name")] pub name: String, - #[serde(rename = "type")] - pub r#type: String, - #[serde(rename = "params")] - pub params: Box, } impl AnalyticsRuleSchema { - pub fn new( - name: String, - r#type: String, - params: crate::models::AnalyticsRuleParameters, - ) -> AnalyticsRuleSchema { + pub fn new(params: models::AnalyticsRuleParameters, r#type: Type, name: String) -> AnalyticsRuleSchema { AnalyticsRuleSchema { - name, - r#type, params: Box::new(params), + r#type, + name, } } } +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum Type { + #[serde(rename = "popular_queries")] + PopularQueries, + #[serde(rename = "nohits_queries")] + NohitsQueries, + #[serde(rename = "counter")] + Counter, +} + +impl Default for Type { + fn default() -> Type { + Self::PopularQueries + } +} + diff --git a/typesense_codegen/src/models/analytics_rule_upsert_schema.rs b/typesense_codegen/src/models/analytics_rule_upsert_schema.rs new file mode 100644 index 0000000..ecd1f80 --- /dev/null +++ b/typesense_codegen/src/models/analytics_rule_upsert_schema.rs @@ -0,0 +1,46 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct AnalyticsRuleUpsertSchema { + #[serde(rename = "params")] + pub params: Box, + #[serde(rename = "type")] + pub r#type: Type, +} + +impl AnalyticsRuleUpsertSchema { + pub fn new(params: models::AnalyticsRuleParameters, r#type: Type) -> AnalyticsRuleUpsertSchema { + AnalyticsRuleUpsertSchema { + params: Box::new(params), + r#type, + } + } +} +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum Type { + #[serde(rename = "popular_queries")] + PopularQueries, + #[serde(rename = "nohits_queries")] + NohitsQueries, + #[serde(rename = "counter")] + Counter, +} + +impl Default for Type { + fn default() -> Type { + Self::PopularQueries + } +} + diff --git a/typesense_codegen/src/models/analytics_rules_retrieve_schema.rs b/typesense_codegen/src/models/analytics_rules_retrieve_schema.rs index ed32555..f55a63c 100644 --- a/typesense_codegen/src/models/analytics_rules_retrieve_schema.rs +++ b/typesense_codegen/src/models/analytics_rules_retrieve_schema.rs @@ -3,19 +3,25 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct AnalyticsRulesRetrieveSchema { #[serde(rename = "rules", skip_serializing_if = "Option::is_none")] - pub rules: Option>, + pub rules: Option>, } impl AnalyticsRulesRetrieveSchema { pub fn new() -> AnalyticsRulesRetrieveSchema { - AnalyticsRulesRetrieveSchema { rules: None } + AnalyticsRulesRetrieveSchema { + rules: None, + } } } + diff --git a/typesense_codegen/src/models/api_key.rs b/typesense_codegen/src/models/api_key.rs index 7de6d81..a4a771d 100644 --- a/typesense_codegen/src/models/api_key.rs +++ b/typesense_codegen/src/models/api_key.rs @@ -3,23 +3,26 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct ApiKey { - #[serde(rename = "value", skip_serializing_if = "Option::is_none")] - pub value: Option, - #[serde(rename = "description")] - pub description: String, #[serde(rename = "actions")] pub actions: Vec, #[serde(rename = "collections")] pub collections: Vec, + #[serde(rename = "description")] + pub description: String, #[serde(rename = "expires_at", skip_serializing_if = "Option::is_none")] pub expires_at: Option, + #[serde(rename = "value", skip_serializing_if = "Option::is_none")] + pub value: Option, #[serde(rename = "id", skip_serializing_if = "Option::is_none")] pub id: Option, #[serde(rename = "value_prefix", skip_serializing_if = "Option::is_none")] @@ -27,15 +30,16 @@ pub struct ApiKey { } impl ApiKey { - pub fn new(description: String, actions: Vec, collections: Vec) -> ApiKey { + pub fn new(actions: Vec, collections: Vec, description: String) -> ApiKey { ApiKey { - value: None, - description, actions, collections, + description, expires_at: None, + value: None, id: None, value_prefix: None, } } } + diff --git a/typesense_codegen/src/models/api_key_delete_response.rs b/typesense_codegen/src/models/api_key_delete_response.rs new file mode 100644 index 0000000..8eb776a --- /dev/null +++ b/typesense_codegen/src/models/api_key_delete_response.rs @@ -0,0 +1,28 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct ApiKeyDeleteResponse { + /// The id of the API key that was deleted + #[serde(rename = "id")] + pub id: i64, +} + +impl ApiKeyDeleteResponse { + pub fn new(id: i64) -> ApiKeyDeleteResponse { + ApiKeyDeleteResponse { + id, + } + } +} + diff --git a/typesense_codegen/src/models/api_key_schema.rs b/typesense_codegen/src/models/api_key_schema.rs index 2f6cb2b..fcddd8b 100644 --- a/typesense_codegen/src/models/api_key_schema.rs +++ b/typesense_codegen/src/models/api_key_schema.rs @@ -3,37 +3,37 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct ApiKeySchema { - #[serde(rename = "value", skip_serializing_if = "Option::is_none")] - pub value: Option, - #[serde(rename = "description")] - pub description: String, #[serde(rename = "actions")] pub actions: Vec, #[serde(rename = "collections")] pub collections: Vec, + #[serde(rename = "description")] + pub description: String, #[serde(rename = "expires_at", skip_serializing_if = "Option::is_none")] pub expires_at: Option, + #[serde(rename = "value", skip_serializing_if = "Option::is_none")] + pub value: Option, } impl ApiKeySchema { - pub fn new( - description: String, - actions: Vec, - collections: Vec, - ) -> ApiKeySchema { + pub fn new(actions: Vec, collections: Vec, description: String) -> ApiKeySchema { ApiKeySchema { - value: None, - description, actions, collections, + description, expires_at: None, + value: None, } } } + diff --git a/typesense_codegen/src/models/api_keys_response.rs b/typesense_codegen/src/models/api_keys_response.rs index f523a98..71b3d28 100644 --- a/typesense_codegen/src/models/api_keys_response.rs +++ b/typesense_codegen/src/models/api_keys_response.rs @@ -3,19 +3,25 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct ApiKeysResponse { #[serde(rename = "keys")] - pub keys: Vec, + pub keys: Vec, } impl ApiKeysResponse { - pub fn new(keys: Vec) -> ApiKeysResponse { - ApiKeysResponse { keys } + pub fn new(keys: Vec) -> ApiKeysResponse { + ApiKeysResponse { + keys, + } } } + diff --git a/typesense_codegen/src/models/api_response.rs b/typesense_codegen/src/models/api_response.rs index 691ea49..42e90ba 100644 --- a/typesense_codegen/src/models/api_response.rs +++ b/typesense_codegen/src/models/api_response.rs @@ -3,11 +3,14 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct ApiResponse { #[serde(rename = "message")] @@ -16,6 +19,9 @@ pub struct ApiResponse { impl ApiResponse { pub fn new(message: String) -> ApiResponse { - ApiResponse { message } + ApiResponse { + message, + } } } + diff --git a/typesense_codegen/src/models/api_stats_response.rs b/typesense_codegen/src/models/api_stats_response.rs new file mode 100644 index 0000000..9daf9bb --- /dev/null +++ b/typesense_codegen/src/models/api_stats_response.rs @@ -0,0 +1,63 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct ApiStatsResponse { + #[serde(rename = "delete_latency_ms", skip_serializing_if = "Option::is_none")] + pub delete_latency_ms: Option, + #[serde(rename = "delete_requests_per_second", skip_serializing_if = "Option::is_none")] + pub delete_requests_per_second: Option, + #[serde(rename = "import_latency_ms", skip_serializing_if = "Option::is_none")] + pub import_latency_ms: Option, + #[serde(rename = "import_requests_per_second", skip_serializing_if = "Option::is_none")] + pub import_requests_per_second: Option, + #[serde(rename = "latency_ms", skip_serializing_if = "Option::is_none")] + pub latency_ms: Option, + #[serde(rename = "overloaded_requests_per_second", skip_serializing_if = "Option::is_none")] + pub overloaded_requests_per_second: Option, + #[serde(rename = "pending_write_batches", skip_serializing_if = "Option::is_none")] + pub pending_write_batches: Option, + #[serde(rename = "requests_per_second", skip_serializing_if = "Option::is_none")] + pub requests_per_second: Option, + #[serde(rename = "search_latency_ms", skip_serializing_if = "Option::is_none")] + pub search_latency_ms: Option, + #[serde(rename = "search_requests_per_second", skip_serializing_if = "Option::is_none")] + pub search_requests_per_second: Option, + #[serde(rename = "total_requests_per_second", skip_serializing_if = "Option::is_none")] + pub total_requests_per_second: Option, + #[serde(rename = "write_latency_ms", skip_serializing_if = "Option::is_none")] + pub write_latency_ms: Option, + #[serde(rename = "write_requests_per_second", skip_serializing_if = "Option::is_none")] + pub write_requests_per_second: Option, +} + +impl ApiStatsResponse { + pub fn new() -> ApiStatsResponse { + ApiStatsResponse { + delete_latency_ms: None, + delete_requests_per_second: None, + import_latency_ms: None, + import_requests_per_second: None, + latency_ms: None, + overloaded_requests_per_second: None, + pending_write_batches: None, + requests_per_second: None, + search_latency_ms: None, + search_requests_per_second: None, + total_requests_per_second: None, + write_latency_ms: None, + write_requests_per_second: None, + } + } +} + diff --git a/typesense_codegen/src/models/collection_alias.rs b/typesense_codegen/src/models/collection_alias.rs index b5ab76e..2c92dfa 100644 --- a/typesense_codegen/src/models/collection_alias.rs +++ b/typesense_codegen/src/models/collection_alias.rs @@ -3,26 +3,30 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct CollectionAlias { - /// Name of the collection alias - #[serde(rename = "name")] - pub name: String, /// Name of the collection the alias mapped to #[serde(rename = "collection_name")] pub collection_name: String, + /// Name of the collection alias + #[serde(rename = "name")] + pub name: String, } impl CollectionAlias { - pub fn new(name: String, collection_name: String) -> CollectionAlias { + pub fn new(collection_name: String, name: String) -> CollectionAlias { CollectionAlias { - name, collection_name, + name, } } } + diff --git a/typesense_codegen/src/models/collection_alias_schema.rs b/typesense_codegen/src/models/collection_alias_schema.rs index 279e88f..5c3c15f 100644 --- a/typesense_codegen/src/models/collection_alias_schema.rs +++ b/typesense_codegen/src/models/collection_alias_schema.rs @@ -3,11 +3,14 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct CollectionAliasSchema { /// Name of the collection you wish to map the alias to @@ -17,6 +20,9 @@ pub struct CollectionAliasSchema { impl CollectionAliasSchema { pub fn new(collection_name: String) -> CollectionAliasSchema { - CollectionAliasSchema { collection_name } + CollectionAliasSchema { + collection_name, + } } } + diff --git a/typesense_codegen/src/models/collection_aliases_response.rs b/typesense_codegen/src/models/collection_aliases_response.rs index 2bc9560..8529794 100644 --- a/typesense_codegen/src/models/collection_aliases_response.rs +++ b/typesense_codegen/src/models/collection_aliases_response.rs @@ -3,19 +3,25 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct CollectionAliasesResponse { #[serde(rename = "aliases")] - pub aliases: Vec, + pub aliases: Vec, } impl CollectionAliasesResponse { - pub fn new(aliases: Vec) -> CollectionAliasesResponse { - CollectionAliasesResponse { aliases } + pub fn new(aliases: Vec) -> CollectionAliasesResponse { + CollectionAliasesResponse { + aliases, + } } } + diff --git a/typesense_codegen/src/models/collection_response.rs b/typesense_codegen/src/models/collection_response.rs index 8676b81..fcd3cb1 100644 --- a/typesense_codegen/src/models/collection_response.rs +++ b/typesense_codegen/src/models/collection_response.rs @@ -3,61 +3,57 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct CollectionResponse { - /// Name of the collection - #[serde(rename = "name")] - pub name: String, - /// A list of fields for querying, filtering and faceting - #[serde(rename = "fields")] - pub fields: Vec, /// The name of an int32 / float field that determines the order in which the search results are ranked when a sort_by clause is not provided during searching. This field must indicate some kind of popularity. - #[serde( - rename = "default_sorting_field", - skip_serializing_if = "Option::is_none" - )] + #[serde(rename = "default_sorting_field", skip_serializing_if = "Option::is_none")] pub default_sorting_field: Option, - /// List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. - #[serde(rename = "token_separators", skip_serializing_if = "Option::is_none")] - pub token_separators: Option>, /// Enables experimental support at a collection level for nested object or object array fields. This field is only available if the Typesense server is version `0.24.0.rcn34` or later. - #[serde( - rename = "enable_nested_fields", - skip_serializing_if = "Option::is_none" - )] + #[serde(rename = "enable_nested_fields", skip_serializing_if = "Option::is_none")] pub enable_nested_fields: Option, - /// List of symbols or special characters to be indexed. + /// A list of fields for querying, filtering and faceting + #[serde(rename = "fields")] + pub fields: Vec, + /// Name of the collection + #[serde(rename = "name")] + pub name: String, + /// List of symbols or special characters to be indexed. #[serde(rename = "symbols_to_index", skip_serializing_if = "Option::is_none")] pub symbols_to_index: Option>, - /// Number of documents in the collection - #[serde(rename = "num_documents")] - pub num_documents: i64, + /// List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. + #[serde(rename = "token_separators", skip_serializing_if = "Option::is_none")] + pub token_separators: Option>, + #[serde(rename = "voice_query_model", skip_serializing_if = "Option::is_none")] + pub voice_query_model: Option>, /// Timestamp of when the collection was created (Unix epoch in seconds) #[serde(rename = "created_at")] pub created_at: i64, + /// Number of documents in the collection + #[serde(rename = "num_documents")] + pub num_documents: i64, } impl CollectionResponse { - pub fn new( - name: String, - fields: Vec, - num_documents: i64, - created_at: i64, - ) -> CollectionResponse { + pub fn new(fields: Vec, name: String, created_at: i64, num_documents: i64) -> CollectionResponse { CollectionResponse { - name, - fields, default_sorting_field: None, - token_separators: None, enable_nested_fields: None, + fields, + name, symbols_to_index: None, - num_documents, + token_separators: None, + voice_query_model: None, created_at, + num_documents, } } } + diff --git a/typesense_codegen/src/models/collection_schema.rs b/typesense_codegen/src/models/collection_schema.rs index 5a373f6..544aff9 100644 --- a/typesense_codegen/src/models/collection_schema.rs +++ b/typesense_codegen/src/models/collection_schema.rs @@ -3,48 +3,49 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct CollectionSchema { - /// Name of the collection - #[serde(rename = "name")] - pub name: String, - /// A list of fields for querying, filtering and faceting - #[serde(rename = "fields")] - pub fields: Vec, /// The name of an int32 / float field that determines the order in which the search results are ranked when a sort_by clause is not provided during searching. This field must indicate some kind of popularity. - #[serde( - rename = "default_sorting_field", - skip_serializing_if = "Option::is_none" - )] + #[serde(rename = "default_sorting_field", skip_serializing_if = "Option::is_none")] pub default_sorting_field: Option, - /// List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. - #[serde(rename = "token_separators", skip_serializing_if = "Option::is_none")] - pub token_separators: Option>, /// Enables experimental support at a collection level for nested object or object array fields. This field is only available if the Typesense server is version `0.24.0.rcn34` or later. - #[serde( - rename = "enable_nested_fields", - skip_serializing_if = "Option::is_none" - )] + #[serde(rename = "enable_nested_fields", skip_serializing_if = "Option::is_none")] pub enable_nested_fields: Option, - /// List of symbols or special characters to be indexed. + /// A list of fields for querying, filtering and faceting + #[serde(rename = "fields")] + pub fields: Vec, + /// Name of the collection + #[serde(rename = "name")] + pub name: String, + /// List of symbols or special characters to be indexed. #[serde(rename = "symbols_to_index", skip_serializing_if = "Option::is_none")] pub symbols_to_index: Option>, + /// List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. + #[serde(rename = "token_separators", skip_serializing_if = "Option::is_none")] + pub token_separators: Option>, + #[serde(rename = "voice_query_model", skip_serializing_if = "Option::is_none")] + pub voice_query_model: Option>, } impl CollectionSchema { - pub fn new(name: String, fields: Vec) -> CollectionSchema { + pub fn new(fields: Vec, name: String) -> CollectionSchema { CollectionSchema { - name, - fields, default_sorting_field: None, - token_separators: None, enable_nested_fields: None, + fields, + name, symbols_to_index: None, + token_separators: None, + voice_query_model: None, } } } + diff --git a/typesense_codegen/src/models/collection_update_schema.rs b/typesense_codegen/src/models/collection_update_schema.rs index f4a4078..672fe56 100644 --- a/typesense_codegen/src/models/collection_update_schema.rs +++ b/typesense_codegen/src/models/collection_update_schema.rs @@ -3,20 +3,26 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct CollectionUpdateSchema { /// A list of fields for querying, filtering and faceting #[serde(rename = "fields")] - pub fields: Vec, + pub fields: Vec, } impl CollectionUpdateSchema { - pub fn new(fields: Vec) -> CollectionUpdateSchema { - CollectionUpdateSchema { fields } + pub fn new(fields: Vec) -> CollectionUpdateSchema { + CollectionUpdateSchema { + fields, + } } } + diff --git a/typesense_codegen/src/models/conversation_model_create_schema.rs b/typesense_codegen/src/models/conversation_model_create_schema.rs new file mode 100644 index 0000000..2442897 --- /dev/null +++ b/typesense_codegen/src/models/conversation_model_create_schema.rs @@ -0,0 +1,60 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct ConversationModelCreateSchema { + /// LLM service's account ID (only applicable for Cloudflare) + #[serde(rename = "account_id", skip_serializing_if = "Option::is_none")] + pub account_id: Option, + /// The LLM service's API Key + #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] + pub api_key: Option, + /// Typesense collection that stores the historical conversations + #[serde(rename = "history_collection")] + pub history_collection: String, + /// An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. + #[serde(rename = "id", skip_serializing_if = "Option::is_none")] + pub id: Option, + /// The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. + #[serde(rename = "max_bytes")] + pub max_bytes: i32, + /// Name of the LLM model offered by OpenAI, Cloudflare or vLLM + #[serde(rename = "model_name")] + pub model_name: String, + /// The system prompt that contains special instructions to the LLM + #[serde(rename = "system_prompt", skip_serializing_if = "Option::is_none")] + pub system_prompt: Option, + /// Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) + #[serde(rename = "ttl", skip_serializing_if = "Option::is_none")] + pub ttl: Option, + /// URL of vLLM service + #[serde(rename = "vllm_url", skip_serializing_if = "Option::is_none")] + pub vllm_url: Option, +} + +impl ConversationModelCreateSchema { + pub fn new(history_collection: String, max_bytes: i32, model_name: String) -> ConversationModelCreateSchema { + ConversationModelCreateSchema { + account_id: None, + api_key: None, + history_collection, + id: None, + max_bytes, + model_name, + system_prompt: None, + ttl: None, + vllm_url: None, + } + } +} + diff --git a/typesense_codegen/src/models/conversation_model_schema.rs b/typesense_codegen/src/models/conversation_model_schema.rs new file mode 100644 index 0000000..5d004f2 --- /dev/null +++ b/typesense_codegen/src/models/conversation_model_schema.rs @@ -0,0 +1,60 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct ConversationModelSchema { + /// LLM service's account ID (only applicable for Cloudflare) + #[serde(rename = "account_id", skip_serializing_if = "Option::is_none")] + pub account_id: Option, + /// The LLM service's API Key + #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] + pub api_key: Option, + /// Typesense collection that stores the historical conversations + #[serde(rename = "history_collection")] + pub history_collection: String, + /// An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. + #[serde(rename = "id")] + pub id: String, + /// The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. + #[serde(rename = "max_bytes")] + pub max_bytes: i32, + /// Name of the LLM model offered by OpenAI, Cloudflare or vLLM + #[serde(rename = "model_name")] + pub model_name: String, + /// The system prompt that contains special instructions to the LLM + #[serde(rename = "system_prompt", skip_serializing_if = "Option::is_none")] + pub system_prompt: Option, + /// Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) + #[serde(rename = "ttl", skip_serializing_if = "Option::is_none")] + pub ttl: Option, + /// URL of vLLM service + #[serde(rename = "vllm_url", skip_serializing_if = "Option::is_none")] + pub vllm_url: Option, +} + +impl ConversationModelSchema { + pub fn new(history_collection: String, id: String, max_bytes: i32, model_name: String) -> ConversationModelSchema { + ConversationModelSchema { + account_id: None, + api_key: None, + history_collection, + id, + max_bytes, + model_name, + system_prompt: None, + ttl: None, + vllm_url: None, + } + } +} + diff --git a/typesense_codegen/src/models/conversation_model_update_schema.rs b/typesense_codegen/src/models/conversation_model_update_schema.rs new file mode 100644 index 0000000..e02b0ba --- /dev/null +++ b/typesense_codegen/src/models/conversation_model_update_schema.rs @@ -0,0 +1,60 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct ConversationModelUpdateSchema { + /// LLM service's account ID (only applicable for Cloudflare) + #[serde(rename = "account_id", skip_serializing_if = "Option::is_none")] + pub account_id: Option, + /// The LLM service's API Key + #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] + pub api_key: Option, + /// Typesense collection that stores the historical conversations + #[serde(rename = "history_collection", skip_serializing_if = "Option::is_none")] + pub history_collection: Option, + /// An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. + #[serde(rename = "id", skip_serializing_if = "Option::is_none")] + pub id: Option, + /// The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. + #[serde(rename = "max_bytes", skip_serializing_if = "Option::is_none")] + pub max_bytes: Option, + /// Name of the LLM model offered by OpenAI, Cloudflare or vLLM + #[serde(rename = "model_name", skip_serializing_if = "Option::is_none")] + pub model_name: Option, + /// The system prompt that contains special instructions to the LLM + #[serde(rename = "system_prompt", skip_serializing_if = "Option::is_none")] + pub system_prompt: Option, + /// Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) + #[serde(rename = "ttl", skip_serializing_if = "Option::is_none")] + pub ttl: Option, + /// URL of vLLM service + #[serde(rename = "vllm_url", skip_serializing_if = "Option::is_none")] + pub vllm_url: Option, +} + +impl ConversationModelUpdateSchema { + pub fn new() -> ConversationModelUpdateSchema { + ConversationModelUpdateSchema { + account_id: None, + api_key: None, + history_collection: None, + id: None, + max_bytes: None, + model_name: None, + system_prompt: None, + ttl: None, + vllm_url: None, + } + } +} + diff --git a/typesense_codegen/src/models/debug_200_response.rs b/typesense_codegen/src/models/debug_200_response.rs index 365fc42..3b1cf17 100644 --- a/typesense_codegen/src/models/debug_200_response.rs +++ b/typesense_codegen/src/models/debug_200_response.rs @@ -3,11 +3,14 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct Debug200Response { #[serde(rename = "version", skip_serializing_if = "Option::is_none")] @@ -16,6 +19,9 @@ pub struct Debug200Response { impl Debug200Response { pub fn new() -> Debug200Response { - Debug200Response { version: None } + Debug200Response { + version: None, + } } } + diff --git a/typesense_codegen/src/models/delete_documents_200_response.rs b/typesense_codegen/src/models/delete_documents_200_response.rs index a509291..03e22e6 100644 --- a/typesense_codegen/src/models/delete_documents_200_response.rs +++ b/typesense_codegen/src/models/delete_documents_200_response.rs @@ -3,11 +3,14 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct DeleteDocuments200Response { #[serde(rename = "num_deleted")] @@ -16,6 +19,9 @@ pub struct DeleteDocuments200Response { impl DeleteDocuments200Response { pub fn new(num_deleted: i32) -> DeleteDocuments200Response { - DeleteDocuments200Response { num_deleted } + DeleteDocuments200Response { + num_deleted, + } } } + diff --git a/typesense_codegen/src/models/delete_documents_delete_documents_parameters_parameter.rs b/typesense_codegen/src/models/delete_documents_delete_documents_parameters_parameter.rs deleted file mode 100644 index 722bdc4..0000000 --- a/typesense_codegen/src/models/delete_documents_delete_documents_parameters_parameter.rs +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Typesense API - * - * An open source search engine for building delightful search experiences. - * - * The version of the OpenAPI document: 0.25.0 - * - * Generated by: https://openapi-generator.tech - */ - -#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct DeleteDocumentsDeleteDocumentsParametersParameter { - #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, - /// Batch size parameter controls the number of documents that should be deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. - #[serde(rename = "batch_size", skip_serializing_if = "Option::is_none")] - pub batch_size: Option, -} - -impl DeleteDocumentsDeleteDocumentsParametersParameter { - pub fn new() -> DeleteDocumentsDeleteDocumentsParametersParameter { - DeleteDocumentsDeleteDocumentsParametersParameter { - filter_by: None, - batch_size: None, - } - } -} diff --git a/typesense_codegen/src/models/delete_stopwords_set_200_response.rs b/typesense_codegen/src/models/delete_stopwords_set_200_response.rs new file mode 100644 index 0000000..716b6e7 --- /dev/null +++ b/typesense_codegen/src/models/delete_stopwords_set_200_response.rs @@ -0,0 +1,27 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct DeleteStopwordsSet200Response { + #[serde(rename = "id")] + pub id: String, +} + +impl DeleteStopwordsSet200Response { + pub fn new(id: String) -> DeleteStopwordsSet200Response { + DeleteStopwordsSet200Response { + id, + } + } +} + diff --git a/typesense_codegen/src/models/dirty_values.rs b/typesense_codegen/src/models/dirty_values.rs new file mode 100644 index 0000000..ab01406 --- /dev/null +++ b/typesense_codegen/src/models/dirty_values.rs @@ -0,0 +1,44 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum DirtyValues { + #[serde(rename = "coerce_or_reject")] + CoerceOrReject, + #[serde(rename = "coerce_or_drop")] + CoerceOrDrop, + #[serde(rename = "drop")] + Drop, + #[serde(rename = "reject")] + Reject, + +} + +impl std::fmt::Display for DirtyValues { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::CoerceOrReject => write!(f, "coerce_or_reject"), + Self::CoerceOrDrop => write!(f, "coerce_or_drop"), + Self::Drop => write!(f, "drop"), + Self::Reject => write!(f, "reject"), + } + } +} + +impl Default for DirtyValues { + fn default() -> DirtyValues { + Self::CoerceOrReject + } +} + diff --git a/typesense_codegen/src/models/document_index_parameters.rs b/typesense_codegen/src/models/document_index_parameters.rs new file mode 100644 index 0000000..339c8f3 --- /dev/null +++ b/typesense_codegen/src/models/document_index_parameters.rs @@ -0,0 +1,27 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct DocumentIndexParameters { + #[serde(rename = "dirty_values", skip_serializing_if = "Option::is_none")] + pub dirty_values: Option, +} + +impl DocumentIndexParameters { + pub fn new() -> DocumentIndexParameters { + DocumentIndexParameters { + dirty_values: None, + } + } +} + diff --git a/typesense_codegen/src/models/drop_tokens_mode.rs b/typesense_codegen/src/models/drop_tokens_mode.rs new file mode 100644 index 0000000..79ced42 --- /dev/null +++ b/typesense_codegen/src/models/drop_tokens_mode.rs @@ -0,0 +1,42 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +/// DropTokensMode : Dictates the direction in which the words in the query must be dropped when the original words in the query do not appear in any document. Values: right_to_left (default), left_to_right, both_sides:3 A note on both_sides:3 - for queries upto 3 tokens (words) in length, this mode will drop tokens from both sides and exhaustively rank all matching results. If query length is greater than 3 words, Typesense will just fallback to default behavior of right_to_left +/// Dictates the direction in which the words in the query must be dropped when the original words in the query do not appear in any document. Values: right_to_left (default), left_to_right, both_sides:3 A note on both_sides:3 - for queries upto 3 tokens (words) in length, this mode will drop tokens from both sides and exhaustively rank all matching results. If query length is greater than 3 words, Typesense will just fallback to default behavior of right_to_left +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum DropTokensMode { + #[serde(rename = "right_to_left")] + RightToLeft, + #[serde(rename = "left_to_right")] + LeftToRight, + #[serde(rename = "both_sides:3")] + BothSidesColon3, + +} + +impl std::fmt::Display for DropTokensMode { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::RightToLeft => write!(f, "right_to_left"), + Self::LeftToRight => write!(f, "left_to_right"), + Self::BothSidesColon3 => write!(f, "both_sides:3"), + } + } +} + +impl Default for DropTokensMode { + fn default() -> DropTokensMode { + Self::RightToLeft + } +} + diff --git a/typesense_codegen/src/models/error_response.rs b/typesense_codegen/src/models/error_response.rs index ec2ef82..45c3487 100644 --- a/typesense_codegen/src/models/error_response.rs +++ b/typesense_codegen/src/models/error_response.rs @@ -3,11 +3,14 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorResponse { #[serde(rename = "message", skip_serializing_if = "Option::is_none")] @@ -16,6 +19,9 @@ pub struct ErrorResponse { impl ErrorResponse { pub fn new() -> ErrorResponse { - ErrorResponse { message: None } + ErrorResponse { + message: None, + } } } + diff --git a/typesense_codegen/src/models/export_documents_export_documents_parameters_parameter.rs b/typesense_codegen/src/models/export_documents_export_documents_parameters_parameter.rs deleted file mode 100644 index c798028..0000000 --- a/typesense_codegen/src/models/export_documents_export_documents_parameters_parameter.rs +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Typesense API - * - * An open source search engine for building delightful search experiences. - * - * The version of the OpenAPI document: 0.25.0 - * - * Generated by: https://openapi-generator.tech - */ - -#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct ExportDocumentsExportDocumentsParametersParameter { - /// Filter conditions for refining your search results. Separate multiple conditions with &&. - #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, - /// List of fields from the document to include in the search result - #[serde(rename = "include_fields")] - pub include_fields: String, - /// List of fields from the document to exclude in the search result - #[serde(rename = "exclude_fields")] - pub exclude_fields: String, -} - -impl ExportDocumentsExportDocumentsParametersParameter { - pub fn new( - include_fields: String, - exclude_fields: String, - ) -> ExportDocumentsExportDocumentsParametersParameter { - ExportDocumentsExportDocumentsParametersParameter { - filter_by: None, - include_fields, - exclude_fields, - } - } -} diff --git a/typesense_codegen/src/models/facet_counts.rs b/typesense_codegen/src/models/facet_counts.rs index 3e0a930..4735264 100644 --- a/typesense_codegen/src/models/facet_counts.rs +++ b/typesense_codegen/src/models/facet_counts.rs @@ -3,19 +3,22 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct FacetCounts { #[serde(rename = "counts", skip_serializing_if = "Option::is_none")] - pub counts: Option>, + pub counts: Option>, #[serde(rename = "field_name", skip_serializing_if = "Option::is_none")] pub field_name: Option, #[serde(rename = "stats", skip_serializing_if = "Option::is_none")] - pub stats: Option>, + pub stats: Option>, } impl FacetCounts { @@ -27,3 +30,4 @@ impl FacetCounts { } } } + diff --git a/typesense_codegen/src/models/facet_counts_counts_inner.rs b/typesense_codegen/src/models/facet_counts_counts_inner.rs index a4dca2e..fbc80d8 100644 --- a/typesense_codegen/src/models/facet_counts_counts_inner.rs +++ b/typesense_codegen/src/models/facet_counts_counts_inner.rs @@ -3,17 +3,22 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct FacetCountsCountsInner { #[serde(rename = "count", skip_serializing_if = "Option::is_none")] pub count: Option, #[serde(rename = "highlighted", skip_serializing_if = "Option::is_none")] pub highlighted: Option, + #[serde(rename = "parent", skip_serializing_if = "Option::is_none")] + pub parent: Option, #[serde(rename = "value", skip_serializing_if = "Option::is_none")] pub value: Option, } @@ -23,7 +28,9 @@ impl FacetCountsCountsInner { FacetCountsCountsInner { count: None, highlighted: None, + parent: None, value: None, } } } + diff --git a/typesense_codegen/src/models/facet_counts_stats.rs b/typesense_codegen/src/models/facet_counts_stats.rs index 142d2ce..f90642d 100644 --- a/typesense_codegen/src/models/facet_counts_stats.rs +++ b/typesense_codegen/src/models/facet_counts_stats.rs @@ -3,13 +3,18 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct FacetCountsStats { + #[serde(rename = "avg", skip_serializing_if = "Option::is_none")] + pub avg: Option, #[serde(rename = "max", skip_serializing_if = "Option::is_none")] pub max: Option, #[serde(rename = "min", skip_serializing_if = "Option::is_none")] @@ -18,18 +23,17 @@ pub struct FacetCountsStats { pub sum: Option, #[serde(rename = "total_values", skip_serializing_if = "Option::is_none")] pub total_values: Option, - #[serde(rename = "avg", skip_serializing_if = "Option::is_none")] - pub avg: Option, } impl FacetCountsStats { pub fn new() -> FacetCountsStats { FacetCountsStats { + avg: None, max: None, min: None, sum: None, total_values: None, - avg: None, } } } + diff --git a/typesense_codegen/src/models/field.rs b/typesense_codegen/src/models/field.rs index 454ae90..dcf32da 100644 --- a/typesense_codegen/src/models/field.rs +++ b/typesense_codegen/src/models/field.rs @@ -3,51 +3,87 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct Field { - #[serde(rename = "name")] - pub name: String, - #[serde(rename = "type")] - pub r#type: String, - #[serde(rename = "optional", skip_serializing_if = "Option::is_none")] - pub optional: Option, + #[serde(rename = "drop", skip_serializing_if = "Option::is_none")] + pub drop: Option, + #[serde(rename = "embed", skip_serializing_if = "Option::is_none")] + pub embed: Option>, #[serde(rename = "facet", skip_serializing_if = "Option::is_none")] pub facet: Option, #[serde(rename = "index", skip_serializing_if = "Option::is_none")] pub index: Option, - #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] - pub locale: Option, - #[serde(rename = "sort", skip_serializing_if = "Option::is_none")] - pub sort: Option, #[serde(rename = "infix", skip_serializing_if = "Option::is_none")] pub infix: Option, + #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] + pub locale: Option, + #[serde(rename = "name")] + pub name: String, #[serde(rename = "num_dim", skip_serializing_if = "Option::is_none")] pub num_dim: Option, - #[serde(rename = "drop", skip_serializing_if = "Option::is_none")] - pub drop: Option, - #[serde(rename = "embed", skip_serializing_if = "Option::is_none")] - pub embed: Option>, + #[serde(rename = "optional", skip_serializing_if = "Option::is_none")] + pub optional: Option, + /// Enables an index optimized for range filtering on numerical fields (e.g. rating:>3.5). Default: false. + #[serde(rename = "range_index", skip_serializing_if = "Option::is_none")] + pub range_index: Option, + /// Name of a field in another collection that should be linked to this collection so that it can be joined during query. + #[serde(rename = "reference", skip_serializing_if = "Option::is_none")] + pub reference: Option, + #[serde(rename = "sort", skip_serializing_if = "Option::is_none")] + pub sort: Option, + /// Values are stemmed before indexing in-memory. Default: false. + #[serde(rename = "stem", skip_serializing_if = "Option::is_none")] + pub stem: Option, + /// Name of the stemming dictionary to use for this field + #[serde(rename = "stem_dictionary", skip_serializing_if = "Option::is_none")] + pub stem_dictionary: Option, + /// When set to false, the field value will not be stored on disk. Default: true. + #[serde(rename = "store", skip_serializing_if = "Option::is_none")] + pub store: Option, + /// List of symbols or special characters to be indexed. + #[serde(rename = "symbols_to_index", skip_serializing_if = "Option::is_none")] + pub symbols_to_index: Option>, + /// List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. + #[serde(rename = "token_separators", skip_serializing_if = "Option::is_none")] + pub token_separators: Option>, + #[serde(rename = "type")] + pub r#type: String, + /// The distance metric to be used for vector search. Default: `cosine`. You can also use `ip` for inner product. + #[serde(rename = "vec_dist", skip_serializing_if = "Option::is_none")] + pub vec_dist: Option, } impl Field { pub fn new(name: String, r#type: String) -> Field { Field { - name, - r#type, - optional: None, + drop: None, + embed: None, facet: None, index: None, - locale: None, - sort: None, infix: None, + locale: None, + name, num_dim: None, - drop: None, - embed: None, + optional: None, + range_index: None, + reference: None, + sort: None, + stem: None, + stem_dictionary: None, + store: None, + symbols_to_index: None, + token_separators: None, + r#type, + vec_dist: None, } } } + diff --git a/typesense_codegen/src/models/field_embed.rs b/typesense_codegen/src/models/field_embed.rs index fcb68e4..92c75cd 100644 --- a/typesense_codegen/src/models/field_embed.rs +++ b/typesense_codegen/src/models/field_embed.rs @@ -3,27 +3,28 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct FieldEmbed { #[serde(rename = "from")] pub from: Vec, #[serde(rename = "model_config")] - pub model_config: Box, + pub model_config: Box, } impl FieldEmbed { - pub fn new( - from: Vec, - model_config: crate::models::FieldEmbedModelConfig, - ) -> FieldEmbed { + pub fn new(from: Vec, model_config: models::FieldEmbedModelConfig) -> FieldEmbed { FieldEmbed { from, model_config: Box::new(model_config), } } } + diff --git a/typesense_codegen/src/models/field_embed_model_config.rs b/typesense_codegen/src/models/field_embed_model_config.rs index a7af536..91f9939 100644 --- a/typesense_codegen/src/models/field_embed_model_config.rs +++ b/typesense_codegen/src/models/field_embed_model_config.rs @@ -3,36 +3,52 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct FieldEmbedModelConfig { - #[serde(rename = "model_name")] - pub model_name: String, - #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] - pub api_key: Option, #[serde(rename = "access_token", skip_serializing_if = "Option::is_none")] pub access_token: Option, + #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] + pub api_key: Option, #[serde(rename = "client_id", skip_serializing_if = "Option::is_none")] pub client_id: Option, #[serde(rename = "client_secret", skip_serializing_if = "Option::is_none")] pub client_secret: Option, + #[serde(rename = "indexing_prefix", skip_serializing_if = "Option::is_none")] + pub indexing_prefix: Option, + #[serde(rename = "model_name")] + pub model_name: String, #[serde(rename = "project_id", skip_serializing_if = "Option::is_none")] pub project_id: Option, + #[serde(rename = "query_prefix", skip_serializing_if = "Option::is_none")] + pub query_prefix: Option, + #[serde(rename = "refresh_token", skip_serializing_if = "Option::is_none")] + pub refresh_token: Option, + #[serde(rename = "url", skip_serializing_if = "Option::is_none")] + pub url: Option, } impl FieldEmbedModelConfig { pub fn new(model_name: String) -> FieldEmbedModelConfig { FieldEmbedModelConfig { - model_name, - api_key: None, access_token: None, + api_key: None, client_id: None, client_secret: None, + indexing_prefix: None, + model_name, project_id: None, + query_prefix: None, + refresh_token: None, + url: None, } } } + diff --git a/typesense_codegen/src/models/health_status.rs b/typesense_codegen/src/models/health_status.rs index b33a2ba..f0433ec 100644 --- a/typesense_codegen/src/models/health_status.rs +++ b/typesense_codegen/src/models/health_status.rs @@ -3,11 +3,14 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct HealthStatus { #[serde(rename = "ok")] @@ -16,6 +19,9 @@ pub struct HealthStatus { impl HealthStatus { pub fn new(ok: bool) -> HealthStatus { - HealthStatus { ok } + HealthStatus { + ok, + } } } + diff --git a/typesense_codegen/src/models/import_documents_import_documents_parameters_parameter.rs b/typesense_codegen/src/models/import_documents_import_documents_parameters_parameter.rs deleted file mode 100644 index 9dd0edb..0000000 --- a/typesense_codegen/src/models/import_documents_import_documents_parameters_parameter.rs +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Typesense API - * - * An open source search engine for building delightful search experiences. - * - * The version of the OpenAPI document: 0.25.0 - * - * Generated by: https://openapi-generator.tech - */ - -#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct ImportDocumentsImportDocumentsParametersParameter { - #[serde(rename = "action", skip_serializing_if = "Option::is_none")] - pub action: Option, - #[serde(rename = "batch_size", skip_serializing_if = "Option::is_none")] - pub batch_size: Option, - #[serde(rename = "dirty_values", skip_serializing_if = "Option::is_none")] - pub dirty_values: Option, - #[serde( - rename = "remote_embedding_batch_size", - skip_serializing_if = "Option::is_none" - )] - pub remote_embedding_batch_size: Option, -} - -impl ImportDocumentsImportDocumentsParametersParameter { - pub fn new() -> ImportDocumentsImportDocumentsParametersParameter { - ImportDocumentsImportDocumentsParametersParameter { - action: None, - batch_size: None, - dirty_values: None, - remote_embedding_batch_size: None, - } - } -} - -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] -pub enum DirtyValues { - #[serde(rename = "coerce_or_reject")] - CoerceOrReject, - #[serde(rename = "coerce_or_drop")] - CoerceOrDrop, - #[serde(rename = "drop")] - Drop, - #[serde(rename = "reject")] - Reject, -} - -impl Default for DirtyValues { - fn default() -> DirtyValues { - Self::CoerceOrReject - } -} diff --git a/typesense_codegen/src/models/index_action.rs b/typesense_codegen/src/models/index_action.rs new file mode 100644 index 0000000..ab27f00 --- /dev/null +++ b/typesense_codegen/src/models/index_action.rs @@ -0,0 +1,44 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum IndexAction { + #[serde(rename = "create")] + Create, + #[serde(rename = "update")] + Update, + #[serde(rename = "upsert")] + Upsert, + #[serde(rename = "emplace")] + Emplace, + +} + +impl std::fmt::Display for IndexAction { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::Create => write!(f, "create"), + Self::Update => write!(f, "update"), + Self::Upsert => write!(f, "upsert"), + Self::Emplace => write!(f, "emplace"), + } + } +} + +impl Default for IndexAction { + fn default() -> IndexAction { + Self::Create + } +} + diff --git a/typesense_codegen/src/models/list_stemming_dictionaries_200_response.rs b/typesense_codegen/src/models/list_stemming_dictionaries_200_response.rs new file mode 100644 index 0000000..e1e3622 --- /dev/null +++ b/typesense_codegen/src/models/list_stemming_dictionaries_200_response.rs @@ -0,0 +1,27 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct ListStemmingDictionaries200Response { + #[serde(rename = "dictionaries", skip_serializing_if = "Option::is_none")] + pub dictionaries: Option>, +} + +impl ListStemmingDictionaries200Response { + pub fn new() -> ListStemmingDictionaries200Response { + ListStemmingDictionaries200Response { + dictionaries: None, + } + } +} + diff --git a/typesense_codegen/src/models/mod.rs b/typesense_codegen/src/models/mod.rs index f8fb452..876cc6d 100644 --- a/typesense_codegen/src/models/mod.rs +++ b/typesense_codegen/src/models/mod.rs @@ -1,21 +1,35 @@ +pub mod analytics_event_create_response; +pub use self::analytics_event_create_response::AnalyticsEventCreateResponse; +pub mod analytics_event_create_schema; +pub use self::analytics_event_create_schema::AnalyticsEventCreateSchema; +pub mod analytics_rule_delete_response; +pub use self::analytics_rule_delete_response::AnalyticsRuleDeleteResponse; pub mod analytics_rule_parameters; pub use self::analytics_rule_parameters::AnalyticsRuleParameters; pub mod analytics_rule_parameters_destination; pub use self::analytics_rule_parameters_destination::AnalyticsRuleParametersDestination; pub mod analytics_rule_parameters_source; pub use self::analytics_rule_parameters_source::AnalyticsRuleParametersSource; +pub mod analytics_rule_parameters_source_events_inner; +pub use self::analytics_rule_parameters_source_events_inner::AnalyticsRuleParametersSourceEventsInner; pub mod analytics_rule_schema; pub use self::analytics_rule_schema::AnalyticsRuleSchema; +pub mod analytics_rule_upsert_schema; +pub use self::analytics_rule_upsert_schema::AnalyticsRuleUpsertSchema; pub mod analytics_rules_retrieve_schema; pub use self::analytics_rules_retrieve_schema::AnalyticsRulesRetrieveSchema; pub mod api_key; pub use self::api_key::ApiKey; +pub mod api_key_delete_response; +pub use self::api_key_delete_response::ApiKeyDeleteResponse; pub mod api_key_schema; pub use self::api_key_schema::ApiKeySchema; pub mod api_keys_response; pub use self::api_keys_response::ApiKeysResponse; pub mod api_response; pub use self::api_response::ApiResponse; +pub mod api_stats_response; +pub use self::api_stats_response::ApiStatsResponse; pub mod collection_alias; pub use self::collection_alias::CollectionAlias; pub mod collection_alias_schema; @@ -28,16 +42,26 @@ pub mod collection_schema; pub use self::collection_schema::CollectionSchema; pub mod collection_update_schema; pub use self::collection_update_schema::CollectionUpdateSchema; +pub mod conversation_model_create_schema; +pub use self::conversation_model_create_schema::ConversationModelCreateSchema; +pub mod conversation_model_schema; +pub use self::conversation_model_schema::ConversationModelSchema; +pub mod conversation_model_update_schema; +pub use self::conversation_model_update_schema::ConversationModelUpdateSchema; pub mod debug_200_response; pub use self::debug_200_response::Debug200Response; pub mod delete_documents_200_response; pub use self::delete_documents_200_response::DeleteDocuments200Response; -pub mod delete_documents_delete_documents_parameters_parameter; -pub use self::delete_documents_delete_documents_parameters_parameter::DeleteDocumentsDeleteDocumentsParametersParameter; +pub mod delete_stopwords_set_200_response; +pub use self::delete_stopwords_set_200_response::DeleteStopwordsSet200Response; +pub mod dirty_values; +pub use self::dirty_values::DirtyValues; +pub mod document_index_parameters; +pub use self::document_index_parameters::DocumentIndexParameters; +pub mod drop_tokens_mode; +pub use self::drop_tokens_mode::DropTokensMode; pub mod error_response; pub use self::error_response::ErrorResponse; -pub mod export_documents_export_documents_parameters_parameter; -pub use self::export_documents_export_documents_parameters_parameter::ExportDocumentsExportDocumentsParametersParameter; pub mod facet_counts; pub use self::facet_counts::FacetCounts; pub mod facet_counts_counts_inner; @@ -52,16 +76,32 @@ pub mod field_embed_model_config; pub use self::field_embed_model_config::FieldEmbedModelConfig; pub mod health_status; pub use self::health_status::HealthStatus; -pub mod import_documents_import_documents_parameters_parameter; -pub use self::import_documents_import_documents_parameters_parameter::ImportDocumentsImportDocumentsParametersParameter; +pub mod index_action; +pub use self::index_action::IndexAction; +pub mod list_stemming_dictionaries_200_response; +pub use self::list_stemming_dictionaries_200_response::ListStemmingDictionaries200Response; pub mod multi_search_collection_parameters; pub use self::multi_search_collection_parameters::MultiSearchCollectionParameters; pub mod multi_search_parameters; pub use self::multi_search_parameters::MultiSearchParameters; pub mod multi_search_result; pub use self::multi_search_result::MultiSearchResult; +pub mod multi_search_result_item; +pub use self::multi_search_result_item::MultiSearchResultItem; pub mod multi_search_searches_parameter; pub use self::multi_search_searches_parameter::MultiSearchSearchesParameter; +pub mod preset_delete_schema; +pub use self::preset_delete_schema::PresetDeleteSchema; +pub mod preset_schema; +pub use self::preset_schema::PresetSchema; +pub mod preset_upsert_schema; +pub use self::preset_upsert_schema::PresetUpsertSchema; +pub mod preset_upsert_schema_value; +pub use self::preset_upsert_schema_value::PresetUpsertSchemaValue; +pub mod presets_retrieve_schema; +pub use self::presets_retrieve_schema::PresetsRetrieveSchema; +pub mod schema_change_status; +pub use self::schema_change_status::SchemaChangeStatus; pub mod scoped_key_parameters; pub use self::scoped_key_parameters::ScopedKeyParameters; pub mod search_grouped_hit; @@ -70,6 +110,8 @@ pub mod search_highlight; pub use self::search_highlight::SearchHighlight; pub mod search_override; pub use self::search_override::SearchOverride; +pub mod search_override_delete_response; +pub use self::search_override_delete_response::SearchOverrideDeleteResponse; pub mod search_override_exclude; pub use self::search_override_exclude::SearchOverrideExclude; pub mod search_override_include; @@ -84,21 +126,41 @@ pub mod search_parameters; pub use self::search_parameters::SearchParameters; pub mod search_result; pub use self::search_result::SearchResult; +pub mod search_result_conversation; +pub use self::search_result_conversation::SearchResultConversation; pub mod search_result_hit; pub use self::search_result_hit::SearchResultHit; +pub mod search_result_hit_text_match_info; +pub use self::search_result_hit_text_match_info::SearchResultHitTextMatchInfo; pub mod search_result_request_params; pub use self::search_result_request_params::SearchResultRequestParams; +pub mod search_result_request_params_voice_query; +pub use self::search_result_request_params_voice_query::SearchResultRequestParamsVoiceQuery; pub mod search_synonym; pub use self::search_synonym::SearchSynonym; +pub mod search_synonym_delete_response; +pub use self::search_synonym_delete_response::SearchSynonymDeleteResponse; pub mod search_synonym_schema; pub use self::search_synonym_schema::SearchSynonymSchema; pub mod search_synonyms_response; pub use self::search_synonyms_response::SearchSynonymsResponse; pub mod snapshot_parameters; pub use self::snapshot_parameters::SnapshotParameters; +pub mod stemming_dictionary; +pub use self::stemming_dictionary::StemmingDictionary; +pub mod stemming_dictionary_words_inner; +pub use self::stemming_dictionary_words_inner::StemmingDictionaryWordsInner; +pub mod stopwords_set_retrieve_schema; +pub use self::stopwords_set_retrieve_schema::StopwordsSetRetrieveSchema; +pub mod stopwords_set_schema; +pub use self::stopwords_set_schema::StopwordsSetSchema; +pub mod stopwords_set_upsert_schema; +pub use self::stopwords_set_upsert_schema::StopwordsSetUpsertSchema; +pub mod stopwords_sets_retrieve_all_schema; +pub use self::stopwords_sets_retrieve_all_schema::StopwordsSetsRetrieveAllSchema; pub mod success_status; pub use self::success_status::SuccessStatus; pub mod update_documents_200_response; pub use self::update_documents_200_response::UpdateDocuments200Response; -pub mod update_documents_update_documents_parameters_parameter; -pub use self::update_documents_update_documents_parameters_parameter::UpdateDocumentsUpdateDocumentsParametersParameter; +pub mod voice_query_model_collection_config; +pub use self::voice_query_model_collection_config::VoiceQueryModelCollectionConfig; diff --git a/typesense_codegen/src/models/multi_search_collection_parameters.rs b/typesense_codegen/src/models/multi_search_collection_parameters.rs index 97a99fd..ad18f80 100644 --- a/typesense_codegen/src/models/multi_search_collection_parameters.rs +++ b/typesense_codegen/src/models/multi_search_collection_parameters.rs @@ -3,229 +3,285 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct MultiSearchCollectionParameters { - /// The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. - #[serde(rename = "q", skip_serializing_if = "Option::is_none")] - pub q: Option, - /// A list of `string` fields that should be queried against. Multiple fields are separated with a comma. - #[serde(rename = "query_by", skip_serializing_if = "Option::is_none")] - pub query_by: Option, - /// The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. - #[serde(rename = "query_by_weights", skip_serializing_if = "Option::is_none")] - pub query_by_weights: Option, - /// In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. - #[serde(rename = "text_match_type", skip_serializing_if = "Option::is_none")] - pub text_match_type: Option, - /// Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. - #[serde(rename = "prefix", skip_serializing_if = "Option::is_none")] - pub prefix: Option, + /// The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. + #[serde(rename = "cache_ttl", skip_serializing_if = "Option::is_none")] + pub cache_ttl: Option, + /// Enable conversational search. + #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] + pub conversation: Option, + /// The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. + #[serde(rename = "conversation_id", skip_serializing_if = "Option::is_none")] + pub conversation_id: Option, + /// The Id of Conversation Model to be used. + #[serde(rename = "conversation_model_id", skip_serializing_if = "Option::is_none")] + pub conversation_model_id: Option, + #[serde(rename = "drop_tokens_mode", skip_serializing_if = "Option::is_none")] + pub drop_tokens_mode: Option, + /// If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 + #[serde(rename = "drop_tokens_threshold", skip_serializing_if = "Option::is_none")] + pub drop_tokens_threshold: Option, + /// If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false + #[serde(rename = "enable_overrides", skip_serializing_if = "Option::is_none")] + pub enable_overrides: Option, + /// If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true + #[serde(rename = "enable_synonyms", skip_serializing_if = "Option::is_none")] + pub enable_synonyms: Option, + /// Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. + #[serde(rename = "enable_typos_for_alpha_numerical_tokens", skip_serializing_if = "Option::is_none")] + pub enable_typos_for_alpha_numerical_tokens: Option, + /// Make Typesense disable typos for numerical tokens. + #[serde(rename = "enable_typos_for_numerical_tokens", skip_serializing_if = "Option::is_none")] + pub enable_typos_for_numerical_tokens: Option, + /// List of fields from the document to exclude in the search result + #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] + pub exclude_fields: Option, + /// Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). + #[serde(rename = "exhaustive_search", skip_serializing_if = "Option::is_none")] + pub exhaustive_search: Option, + /// A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. + #[serde(rename = "facet_by", skip_serializing_if = "Option::is_none")] + pub facet_by: Option, + /// Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". + #[serde(rename = "facet_query", skip_serializing_if = "Option::is_none")] + pub facet_query: Option, + /// Comma separated string of nested facet fields whose parent object should be returned in facet response. + #[serde(rename = "facet_return_parent", skip_serializing_if = "Option::is_none")] + pub facet_return_parent: Option, + /// Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). + #[serde(rename = "facet_strategy", skip_serializing_if = "Option::is_none")] + pub facet_strategy: Option, + /// Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. + #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] + pub filter_by: Option, + /// Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false + #[serde(rename = "filter_curated_hits", skip_serializing_if = "Option::is_none")] + pub filter_curated_hits: Option, + /// You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. + #[serde(rename = "group_by", skip_serializing_if = "Option::is_none")] + pub group_by: Option, + /// Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 + #[serde(rename = "group_limit", skip_serializing_if = "Option::is_none")] + pub group_limit: Option, + /// Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true + #[serde(rename = "group_missing_values", skip_serializing_if = "Option::is_none")] + pub group_missing_values: Option, + /// A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + #[serde(rename = "hidden_hits", skip_serializing_if = "Option::is_none")] + pub hidden_hits: Option, + /// The number of tokens that should surround the highlighted text on each side. Default: 4 + #[serde(rename = "highlight_affix_num_tokens", skip_serializing_if = "Option::is_none")] + pub highlight_affix_num_tokens: Option, + /// The end tag used for the highlighted snippets. Default: `` + #[serde(rename = "highlight_end_tag", skip_serializing_if = "Option::is_none")] + pub highlight_end_tag: Option, + /// A list of custom fields that must be highlighted even if you don't query for them + #[serde(rename = "highlight_fields", skip_serializing_if = "Option::is_none")] + pub highlight_fields: Option, + /// List of fields which should be highlighted fully without snippeting + #[serde(rename = "highlight_full_fields", skip_serializing_if = "Option::is_none")] + pub highlight_full_fields: Option, + /// The start tag used for the highlighted snippets. Default: `` + #[serde(rename = "highlight_start_tag", skip_serializing_if = "Option::is_none")] + pub highlight_start_tag: Option, + /// List of fields from the document to include in the search result + #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] + pub include_fields: Option, /// If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results #[serde(rename = "infix", skip_serializing_if = "Option::is_none")] pub infix: Option, + /// Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. + #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] + pub limit: Option, /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. #[serde(rename = "max_extra_prefix", skip_serializing_if = "Option::is_none")] pub max_extra_prefix: Option, /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. #[serde(rename = "max_extra_suffix", skip_serializing_if = "Option::is_none")] pub max_extra_suffix: Option, - /// Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. - #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, - /// A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` - #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] - pub sort_by: Option, - /// A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. - #[serde(rename = "facet_by", skip_serializing_if = "Option::is_none")] - pub facet_by: Option, /// Maximum number of facet values to be returned. #[serde(rename = "max_facet_values", skip_serializing_if = "Option::is_none")] pub max_facet_values: Option, - /// Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". - #[serde(rename = "facet_query", skip_serializing_if = "Option::is_none")] - pub facet_query: Option, - /// The number of typographical errors (1 or 2) that would be tolerated. Default: 2 + /// Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + #[serde(rename = "min_len_1typo", skip_serializing_if = "Option::is_none")] + pub min_len_1typo: Option, + /// Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + #[serde(rename = "min_len_2typo", skip_serializing_if = "Option::is_none")] + pub min_len_2typo: Option, + /// The number of typographical errors (1 or 2) that would be tolerated. Default: 2 #[serde(rename = "num_typos", skip_serializing_if = "Option::is_none")] pub num_typos: Option, + /// Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. + #[serde(rename = "offset", skip_serializing_if = "Option::is_none")] + pub offset: Option, + /// Comma separated list of tags to trigger the curations rules that match the tags. + #[serde(rename = "override_tags", skip_serializing_if = "Option::is_none")] + pub override_tags: Option, /// Results from this specific page number would be fetched. #[serde(rename = "page", skip_serializing_if = "Option::is_none")] pub page: Option, /// Number of results to fetch per page. Default: 10 #[serde(rename = "per_page", skip_serializing_if = "Option::is_none")] pub per_page: Option, - /// Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. - #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. - #[serde(rename = "offset", skip_serializing_if = "Option::is_none")] - pub offset: Option, - /// You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. - #[serde(rename = "group_by", skip_serializing_if = "Option::is_none")] - pub group_by: Option, - /// Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 - #[serde(rename = "group_limit", skip_serializing_if = "Option::is_none")] - pub group_limit: Option, - /// List of fields from the document to include in the search result - #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] - pub include_fields: Option, - /// List of fields from the document to exclude in the search result - #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] - pub exclude_fields: Option, - /// List of fields which should be highlighted fully without snippeting - #[serde( - rename = "highlight_full_fields", - skip_serializing_if = "Option::is_none" - )] - pub highlight_full_fields: Option, - /// The number of tokens that should surround the highlighted text on each side. Default: 4 - #[serde( - rename = "highlight_affix_num_tokens", - skip_serializing_if = "Option::is_none" - )] - pub highlight_affix_num_tokens: Option, - /// The start tag used for the highlighted snippets. Default: `` - #[serde( - rename = "highlight_start_tag", - skip_serializing_if = "Option::is_none" - )] - pub highlight_start_tag: Option, - /// The end tag used for the highlighted snippets. Default: `` - #[serde(rename = "highlight_end_tag", skip_serializing_if = "Option::is_none")] - pub highlight_end_tag: Option, - /// Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 - #[serde(rename = "snippet_threshold", skip_serializing_if = "Option::is_none")] - pub snippet_threshold: Option, - /// If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 - #[serde( - rename = "drop_tokens_threshold", - skip_serializing_if = "Option::is_none" - )] - pub drop_tokens_threshold: Option, - /// If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 - #[serde( - rename = "typo_tokens_threshold", - skip_serializing_if = "Option::is_none" - )] - pub typo_tokens_threshold: Option, - /// A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + /// A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. #[serde(rename = "pinned_hits", skip_serializing_if = "Option::is_none")] pub pinned_hits: Option, - /// A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - #[serde(rename = "hidden_hits", skip_serializing_if = "Option::is_none")] - pub hidden_hits: Option, - /// A list of custom fields that must be highlighted even if you don't query for them - #[serde(rename = "highlight_fields", skip_serializing_if = "Option::is_none")] - pub highlight_fields: Option, - /// You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same - #[serde( - rename = "pre_segmented_query", - skip_serializing_if = "Option::is_none" - )] + /// You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same + #[serde(rename = "pre_segmented_query", skip_serializing_if = "Option::is_none")] pub pre_segmented_query: Option, - /// Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. + /// Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. + #[serde(rename = "prefix", skip_serializing_if = "Option::is_none")] + pub prefix: Option, + /// Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. #[serde(rename = "preset", skip_serializing_if = "Option::is_none")] pub preset: Option, - /// If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false - #[serde(rename = "enable_overrides", skip_serializing_if = "Option::is_none")] - pub enable_overrides: Option, - /// Set this parameter to true to ensure that an exact match is ranked above the others - #[serde( - rename = "prioritize_exact_match", - skip_serializing_if = "Option::is_none" - )] + /// Set this parameter to true to ensure that an exact match is ranked above the others + #[serde(rename = "prioritize_exact_match", skip_serializing_if = "Option::is_none")] pub prioritize_exact_match: Option, - /// Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). - #[serde(rename = "exhaustive_search", skip_serializing_if = "Option::is_none")] - pub exhaustive_search: Option, - /// Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. + /// Make Typesense prioritize documents where the query words appear in more number of fields. + #[serde(rename = "prioritize_num_matching_fields", skip_serializing_if = "Option::is_none")] + pub prioritize_num_matching_fields: Option, + /// Make Typesense prioritize documents where the query words appear earlier in the text. + #[serde(rename = "prioritize_token_position", skip_serializing_if = "Option::is_none")] + pub prioritize_token_position: Option, + /// The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. + #[serde(rename = "q", skip_serializing_if = "Option::is_none")] + pub q: Option, + /// A list of `string` fields that should be queried against. Multiple fields are separated with a comma. + #[serde(rename = "query_by", skip_serializing_if = "Option::is_none")] + pub query_by: Option, + /// The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. + #[serde(rename = "query_by_weights", skip_serializing_if = "Option::is_none")] + pub query_by_weights: Option, + /// Number of times to retry fetching remote embeddings. + #[serde(rename = "remote_embedding_num_tries", skip_serializing_if = "Option::is_none")] + pub remote_embedding_num_tries: Option, + /// Timeout (in milliseconds) for fetching remote embeddings. + #[serde(rename = "remote_embedding_timeout_ms", skip_serializing_if = "Option::is_none")] + pub remote_embedding_timeout_ms: Option, + /// Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. #[serde(rename = "search_cutoff_ms", skip_serializing_if = "Option::is_none")] pub search_cutoff_ms: Option, - /// Enable server side caching of search query results. By default, caching is disabled. + /// Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 + #[serde(rename = "snippet_threshold", skip_serializing_if = "Option::is_none")] + pub snippet_threshold: Option, + /// A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` + #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] + pub sort_by: Option, + /// Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. + #[serde(rename = "stopwords", skip_serializing_if = "Option::is_none")] + pub stopwords: Option, + /// Allow synonym resolution on typo-corrected words in the query. Default: 0 + #[serde(rename = "synonym_num_typos", skip_serializing_if = "Option::is_none")] + pub synonym_num_typos: Option, + /// Allow synonym resolution on word prefixes in the query. Default: false + #[serde(rename = "synonym_prefix", skip_serializing_if = "Option::is_none")] + pub synonym_prefix: Option, + /// In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. + #[serde(rename = "text_match_type", skip_serializing_if = "Option::is_none")] + pub text_match_type: Option, + /// If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 + #[serde(rename = "typo_tokens_threshold", skip_serializing_if = "Option::is_none")] + pub typo_tokens_threshold: Option, + /// Enable server side caching of search query results. By default, caching is disabled. #[serde(rename = "use_cache", skip_serializing_if = "Option::is_none")] pub use_cache: Option, - /// The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. - #[serde(rename = "cache_ttl", skip_serializing_if = "Option::is_none")] - pub cache_ttl: Option, - /// Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - #[serde(rename = "min_len_1typo", skip_serializing_if = "Option::is_none")] - pub min_len_1typo: Option, - /// Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - #[serde(rename = "min_len_2typo", skip_serializing_if = "Option::is_none")] - pub min_len_2typo: Option, - /// Vector query expression for fetching documents \"closest\" to a given query/document vector. + /// Vector query expression for fetching documents \"closest\" to a given query/document vector. #[serde(rename = "vector_query", skip_serializing_if = "Option::is_none")] pub vector_query: Option, - /// Timeout (in milliseconds) for fetching remote embeddings. - #[serde( - rename = "remote_embedding_timeout_ms", - skip_serializing_if = "Option::is_none" - )] - pub remote_embedding_timeout_ms: Option, - /// Number of times to retry fetching remote embeddings. - #[serde( - rename = "remote_embedding_num_tries", - skip_serializing_if = "Option::is_none" - )] - pub remote_embedding_num_tries: Option, - /// The collection to search in. - #[serde(rename = "collection")] - pub collection: String, + /// The base64 encoded audio file in 16 khz 16-bit WAV format. + #[serde(rename = "voice_query", skip_serializing_if = "Option::is_none")] + pub voice_query: Option, + /// The collection to search in. + #[serde(rename = "collection", skip_serializing_if = "Option::is_none")] + pub collection: Option, + /// When true, computes both text match and vector distance scores for all matches in hybrid search. Documents found only through keyword search will get a vector distance score, and documents found only through vector search will get a text match score. + #[serde(rename = "rerank_hybrid_matches", skip_serializing_if = "Option::is_none")] + pub rerank_hybrid_matches: Option, + /// A separate search API key for each search within a multi_search request + #[serde(rename = "x-typesense-api-key", skip_serializing_if = "Option::is_none")] + pub x_typesense_api_key: Option, } impl MultiSearchCollectionParameters { - pub fn new(collection: String) -> MultiSearchCollectionParameters { + pub fn new() -> MultiSearchCollectionParameters { MultiSearchCollectionParameters { - q: None, - query_by: None, - query_by_weights: None, - text_match_type: None, - prefix: None, + cache_ttl: None, + conversation: None, + conversation_id: None, + conversation_model_id: None, + drop_tokens_mode: None, + drop_tokens_threshold: None, + enable_overrides: None, + enable_synonyms: None, + enable_typos_for_alpha_numerical_tokens: None, + enable_typos_for_numerical_tokens: None, + exclude_fields: None, + exhaustive_search: None, + facet_by: None, + facet_query: None, + facet_return_parent: None, + facet_strategy: None, + filter_by: None, + filter_curated_hits: None, + group_by: None, + group_limit: None, + group_missing_values: None, + hidden_hits: None, + highlight_affix_num_tokens: None, + highlight_end_tag: None, + highlight_fields: None, + highlight_full_fields: None, + highlight_start_tag: None, + include_fields: None, infix: None, + limit: None, max_extra_prefix: None, max_extra_suffix: None, - filter_by: None, - sort_by: None, - facet_by: None, max_facet_values: None, - facet_query: None, + min_len_1typo: None, + min_len_2typo: None, num_typos: None, + offset: None, + override_tags: None, page: None, per_page: None, - limit: None, - offset: None, - group_by: None, - group_limit: None, - include_fields: None, - exclude_fields: None, - highlight_full_fields: None, - highlight_affix_num_tokens: None, - highlight_start_tag: None, - highlight_end_tag: None, - snippet_threshold: None, - drop_tokens_threshold: None, - typo_tokens_threshold: None, pinned_hits: None, - hidden_hits: None, - highlight_fields: None, pre_segmented_query: None, + prefix: None, preset: None, - enable_overrides: None, prioritize_exact_match: None, - exhaustive_search: None, + prioritize_num_matching_fields: None, + prioritize_token_position: None, + q: None, + query_by: None, + query_by_weights: None, + remote_embedding_num_tries: None, + remote_embedding_timeout_ms: None, search_cutoff_ms: None, + snippet_threshold: None, + sort_by: None, + stopwords: None, + synonym_num_typos: None, + synonym_prefix: None, + text_match_type: None, + typo_tokens_threshold: None, use_cache: None, - cache_ttl: None, - min_len_1typo: None, - min_len_2typo: None, vector_query: None, - remote_embedding_timeout_ms: None, - remote_embedding_num_tries: None, - collection, + voice_query: None, + collection: None, + rerank_hybrid_matches: None, + x_typesense_api_key: None, } } } + diff --git a/typesense_codegen/src/models/multi_search_parameters.rs b/typesense_codegen/src/models/multi_search_parameters.rs index 409cc90..6de9b21 100644 --- a/typesense_codegen/src/models/multi_search_parameters.rs +++ b/typesense_codegen/src/models/multi_search_parameters.rs @@ -3,228 +3,275 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ -/// MultiSearchParameters : Parameters for the multi search API. +use crate::models; +use serde::{Deserialize, Serialize}; +/// MultiSearchParameters : Parameters for the multi search API. #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct MultiSearchParameters { - /// The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. - #[serde(rename = "q", skip_serializing_if = "Option::is_none")] - pub q: Option, - /// A list of `string` fields that should be queried against. Multiple fields are separated with a comma. - #[serde(rename = "query_by", skip_serializing_if = "Option::is_none")] - pub query_by: Option, - /// The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. - #[serde(rename = "query_by_weights", skip_serializing_if = "Option::is_none")] - pub query_by_weights: Option, - /// In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. - #[serde(rename = "text_match_type", skip_serializing_if = "Option::is_none")] - pub text_match_type: Option, - /// Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. - #[serde(rename = "prefix", skip_serializing_if = "Option::is_none")] - pub prefix: Option, + /// The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. + #[serde(rename = "cache_ttl", skip_serializing_if = "Option::is_none")] + pub cache_ttl: Option, + /// Enable conversational search. + #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] + pub conversation: Option, + /// The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. + #[serde(rename = "conversation_id", skip_serializing_if = "Option::is_none")] + pub conversation_id: Option, + /// The Id of Conversation Model to be used. + #[serde(rename = "conversation_model_id", skip_serializing_if = "Option::is_none")] + pub conversation_model_id: Option, + #[serde(rename = "drop_tokens_mode", skip_serializing_if = "Option::is_none")] + pub drop_tokens_mode: Option, + /// If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 + #[serde(rename = "drop_tokens_threshold", skip_serializing_if = "Option::is_none")] + pub drop_tokens_threshold: Option, + /// If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false + #[serde(rename = "enable_overrides", skip_serializing_if = "Option::is_none")] + pub enable_overrides: Option, + /// If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true + #[serde(rename = "enable_synonyms", skip_serializing_if = "Option::is_none")] + pub enable_synonyms: Option, + /// Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. + #[serde(rename = "enable_typos_for_alpha_numerical_tokens", skip_serializing_if = "Option::is_none")] + pub enable_typos_for_alpha_numerical_tokens: Option, + /// Make Typesense disable typos for numerical tokens. + #[serde(rename = "enable_typos_for_numerical_tokens", skip_serializing_if = "Option::is_none")] + pub enable_typos_for_numerical_tokens: Option, + /// List of fields from the document to exclude in the search result + #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] + pub exclude_fields: Option, + /// Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). + #[serde(rename = "exhaustive_search", skip_serializing_if = "Option::is_none")] + pub exhaustive_search: Option, + /// A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. + #[serde(rename = "facet_by", skip_serializing_if = "Option::is_none")] + pub facet_by: Option, + /// Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". + #[serde(rename = "facet_query", skip_serializing_if = "Option::is_none")] + pub facet_query: Option, + /// Comma separated string of nested facet fields whose parent object should be returned in facet response. + #[serde(rename = "facet_return_parent", skip_serializing_if = "Option::is_none")] + pub facet_return_parent: Option, + /// Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). + #[serde(rename = "facet_strategy", skip_serializing_if = "Option::is_none")] + pub facet_strategy: Option, + /// Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. + #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] + pub filter_by: Option, + /// Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false + #[serde(rename = "filter_curated_hits", skip_serializing_if = "Option::is_none")] + pub filter_curated_hits: Option, + /// You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. + #[serde(rename = "group_by", skip_serializing_if = "Option::is_none")] + pub group_by: Option, + /// Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 + #[serde(rename = "group_limit", skip_serializing_if = "Option::is_none")] + pub group_limit: Option, + /// Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true + #[serde(rename = "group_missing_values", skip_serializing_if = "Option::is_none")] + pub group_missing_values: Option, + /// A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + #[serde(rename = "hidden_hits", skip_serializing_if = "Option::is_none")] + pub hidden_hits: Option, + /// The number of tokens that should surround the highlighted text on each side. Default: 4 + #[serde(rename = "highlight_affix_num_tokens", skip_serializing_if = "Option::is_none")] + pub highlight_affix_num_tokens: Option, + /// The end tag used for the highlighted snippets. Default: `` + #[serde(rename = "highlight_end_tag", skip_serializing_if = "Option::is_none")] + pub highlight_end_tag: Option, + /// A list of custom fields that must be highlighted even if you don't query for them + #[serde(rename = "highlight_fields", skip_serializing_if = "Option::is_none")] + pub highlight_fields: Option, + /// List of fields which should be highlighted fully without snippeting + #[serde(rename = "highlight_full_fields", skip_serializing_if = "Option::is_none")] + pub highlight_full_fields: Option, + /// The start tag used for the highlighted snippets. Default: `` + #[serde(rename = "highlight_start_tag", skip_serializing_if = "Option::is_none")] + pub highlight_start_tag: Option, + /// List of fields from the document to include in the search result + #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] + pub include_fields: Option, /// If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results #[serde(rename = "infix", skip_serializing_if = "Option::is_none")] pub infix: Option, + /// Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. + #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] + pub limit: Option, /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. #[serde(rename = "max_extra_prefix", skip_serializing_if = "Option::is_none")] pub max_extra_prefix: Option, /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. #[serde(rename = "max_extra_suffix", skip_serializing_if = "Option::is_none")] pub max_extra_suffix: Option, - /// Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. - #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, - /// A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` - #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] - pub sort_by: Option, - /// A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. - #[serde(rename = "facet_by", skip_serializing_if = "Option::is_none")] - pub facet_by: Option, /// Maximum number of facet values to be returned. #[serde(rename = "max_facet_values", skip_serializing_if = "Option::is_none")] pub max_facet_values: Option, - /// Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". - #[serde(rename = "facet_query", skip_serializing_if = "Option::is_none")] - pub facet_query: Option, - /// The number of typographical errors (1 or 2) that would be tolerated. Default: 2 + /// Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + #[serde(rename = "min_len_1typo", skip_serializing_if = "Option::is_none")] + pub min_len_1typo: Option, + /// Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + #[serde(rename = "min_len_2typo", skip_serializing_if = "Option::is_none")] + pub min_len_2typo: Option, + /// The number of typographical errors (1 or 2) that would be tolerated. Default: 2 #[serde(rename = "num_typos", skip_serializing_if = "Option::is_none")] pub num_typos: Option, + /// Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. + #[serde(rename = "offset", skip_serializing_if = "Option::is_none")] + pub offset: Option, + /// Comma separated list of tags to trigger the curations rules that match the tags. + #[serde(rename = "override_tags", skip_serializing_if = "Option::is_none")] + pub override_tags: Option, /// Results from this specific page number would be fetched. #[serde(rename = "page", skip_serializing_if = "Option::is_none")] pub page: Option, /// Number of results to fetch per page. Default: 10 #[serde(rename = "per_page", skip_serializing_if = "Option::is_none")] pub per_page: Option, - /// Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. - #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. - #[serde(rename = "offset", skip_serializing_if = "Option::is_none")] - pub offset: Option, - /// You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. - #[serde(rename = "group_by", skip_serializing_if = "Option::is_none")] - pub group_by: Option, - /// Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 - #[serde(rename = "group_limit", skip_serializing_if = "Option::is_none")] - pub group_limit: Option, - /// List of fields from the document to include in the search result - #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] - pub include_fields: Option, - /// List of fields from the document to exclude in the search result - #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] - pub exclude_fields: Option, - /// List of fields which should be highlighted fully without snippeting - #[serde( - rename = "highlight_full_fields", - skip_serializing_if = "Option::is_none" - )] - pub highlight_full_fields: Option, - /// The number of tokens that should surround the highlighted text on each side. Default: 4 - #[serde( - rename = "highlight_affix_num_tokens", - skip_serializing_if = "Option::is_none" - )] - pub highlight_affix_num_tokens: Option, - /// The start tag used for the highlighted snippets. Default: `` - #[serde( - rename = "highlight_start_tag", - skip_serializing_if = "Option::is_none" - )] - pub highlight_start_tag: Option, - /// The end tag used for the highlighted snippets. Default: `` - #[serde(rename = "highlight_end_tag", skip_serializing_if = "Option::is_none")] - pub highlight_end_tag: Option, - /// Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 - #[serde(rename = "snippet_threshold", skip_serializing_if = "Option::is_none")] - pub snippet_threshold: Option, - /// If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 - #[serde( - rename = "drop_tokens_threshold", - skip_serializing_if = "Option::is_none" - )] - pub drop_tokens_threshold: Option, - /// If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 - #[serde( - rename = "typo_tokens_threshold", - skip_serializing_if = "Option::is_none" - )] - pub typo_tokens_threshold: Option, - /// A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + /// A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. #[serde(rename = "pinned_hits", skip_serializing_if = "Option::is_none")] pub pinned_hits: Option, - /// A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - #[serde(rename = "hidden_hits", skip_serializing_if = "Option::is_none")] - pub hidden_hits: Option, - /// A list of custom fields that must be highlighted even if you don't query for them - #[serde(rename = "highlight_fields", skip_serializing_if = "Option::is_none")] - pub highlight_fields: Option, - /// You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same - #[serde( - rename = "pre_segmented_query", - skip_serializing_if = "Option::is_none" - )] + /// You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same + #[serde(rename = "pre_segmented_query", skip_serializing_if = "Option::is_none")] pub pre_segmented_query: Option, - /// Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. + /// Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. + #[serde(rename = "prefix", skip_serializing_if = "Option::is_none")] + pub prefix: Option, + /// Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. #[serde(rename = "preset", skip_serializing_if = "Option::is_none")] pub preset: Option, - /// If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false - #[serde(rename = "enable_overrides", skip_serializing_if = "Option::is_none")] - pub enable_overrides: Option, - /// Set this parameter to true to ensure that an exact match is ranked above the others - #[serde( - rename = "prioritize_exact_match", - skip_serializing_if = "Option::is_none" - )] + /// Set this parameter to true to ensure that an exact match is ranked above the others + #[serde(rename = "prioritize_exact_match", skip_serializing_if = "Option::is_none")] pub prioritize_exact_match: Option, - /// Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). - #[serde(rename = "exhaustive_search", skip_serializing_if = "Option::is_none")] - pub exhaustive_search: Option, - /// Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. + /// Make Typesense prioritize documents where the query words appear in more number of fields. + #[serde(rename = "prioritize_num_matching_fields", skip_serializing_if = "Option::is_none")] + pub prioritize_num_matching_fields: Option, + /// Make Typesense prioritize documents where the query words appear earlier in the text. + #[serde(rename = "prioritize_token_position", skip_serializing_if = "Option::is_none")] + pub prioritize_token_position: Option, + /// The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. + #[serde(rename = "q", skip_serializing_if = "Option::is_none")] + pub q: Option, + /// A list of `string` fields that should be queried against. Multiple fields are separated with a comma. + #[serde(rename = "query_by", skip_serializing_if = "Option::is_none")] + pub query_by: Option, + /// The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. + #[serde(rename = "query_by_weights", skip_serializing_if = "Option::is_none")] + pub query_by_weights: Option, + /// Number of times to retry fetching remote embeddings. + #[serde(rename = "remote_embedding_num_tries", skip_serializing_if = "Option::is_none")] + pub remote_embedding_num_tries: Option, + /// Timeout (in milliseconds) for fetching remote embeddings. + #[serde(rename = "remote_embedding_timeout_ms", skip_serializing_if = "Option::is_none")] + pub remote_embedding_timeout_ms: Option, + /// Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. #[serde(rename = "search_cutoff_ms", skip_serializing_if = "Option::is_none")] pub search_cutoff_ms: Option, - /// Enable server side caching of search query results. By default, caching is disabled. + /// Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 + #[serde(rename = "snippet_threshold", skip_serializing_if = "Option::is_none")] + pub snippet_threshold: Option, + /// A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` + #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] + pub sort_by: Option, + /// Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. + #[serde(rename = "stopwords", skip_serializing_if = "Option::is_none")] + pub stopwords: Option, + /// Allow synonym resolution on typo-corrected words in the query. Default: 0 + #[serde(rename = "synonym_num_typos", skip_serializing_if = "Option::is_none")] + pub synonym_num_typos: Option, + /// Allow synonym resolution on word prefixes in the query. Default: false + #[serde(rename = "synonym_prefix", skip_serializing_if = "Option::is_none")] + pub synonym_prefix: Option, + /// In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. + #[serde(rename = "text_match_type", skip_serializing_if = "Option::is_none")] + pub text_match_type: Option, + /// If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 + #[serde(rename = "typo_tokens_threshold", skip_serializing_if = "Option::is_none")] + pub typo_tokens_threshold: Option, + /// Enable server side caching of search query results. By default, caching is disabled. #[serde(rename = "use_cache", skip_serializing_if = "Option::is_none")] pub use_cache: Option, - /// The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. - #[serde(rename = "cache_ttl", skip_serializing_if = "Option::is_none")] - pub cache_ttl: Option, - /// Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - #[serde(rename = "min_len_1typo", skip_serializing_if = "Option::is_none")] - pub min_len_1typo: Option, - /// Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - #[serde(rename = "min_len_2typo", skip_serializing_if = "Option::is_none")] - pub min_len_2typo: Option, - /// Vector query expression for fetching documents \"closest\" to a given query/document vector. + /// Vector query expression for fetching documents \"closest\" to a given query/document vector. #[serde(rename = "vector_query", skip_serializing_if = "Option::is_none")] pub vector_query: Option, - /// Timeout (in milliseconds) for fetching remote embeddings. - #[serde( - rename = "remote_embedding_timeout_ms", - skip_serializing_if = "Option::is_none" - )] - pub remote_embedding_timeout_ms: Option, - /// Number of times to retry fetching remote embeddings. - #[serde( - rename = "remote_embedding_num_tries", - skip_serializing_if = "Option::is_none" - )] - pub remote_embedding_num_tries: Option, + /// The base64 encoded audio file in 16 khz 16-bit WAV format. + #[serde(rename = "voice_query", skip_serializing_if = "Option::is_none")] + pub voice_query: Option, } impl MultiSearchParameters { - /// Parameters for the multi search API. + /// Parameters for the multi search API. pub fn new() -> MultiSearchParameters { MultiSearchParameters { - q: None, - query_by: None, - query_by_weights: None, - text_match_type: None, - prefix: None, + cache_ttl: None, + conversation: None, + conversation_id: None, + conversation_model_id: None, + drop_tokens_mode: None, + drop_tokens_threshold: None, + enable_overrides: None, + enable_synonyms: None, + enable_typos_for_alpha_numerical_tokens: None, + enable_typos_for_numerical_tokens: None, + exclude_fields: None, + exhaustive_search: None, + facet_by: None, + facet_query: None, + facet_return_parent: None, + facet_strategy: None, + filter_by: None, + filter_curated_hits: None, + group_by: None, + group_limit: None, + group_missing_values: None, + hidden_hits: None, + highlight_affix_num_tokens: None, + highlight_end_tag: None, + highlight_fields: None, + highlight_full_fields: None, + highlight_start_tag: None, + include_fields: None, infix: None, + limit: None, max_extra_prefix: None, max_extra_suffix: None, - filter_by: None, - sort_by: None, - facet_by: None, max_facet_values: None, - facet_query: None, + min_len_1typo: None, + min_len_2typo: None, num_typos: None, + offset: None, + override_tags: None, page: None, per_page: None, - limit: None, - offset: None, - group_by: None, - group_limit: None, - include_fields: None, - exclude_fields: None, - highlight_full_fields: None, - highlight_affix_num_tokens: None, - highlight_start_tag: None, - highlight_end_tag: None, - snippet_threshold: None, - drop_tokens_threshold: None, - typo_tokens_threshold: None, pinned_hits: None, - hidden_hits: None, - highlight_fields: None, pre_segmented_query: None, + prefix: None, preset: None, - enable_overrides: None, prioritize_exact_match: None, - exhaustive_search: None, + prioritize_num_matching_fields: None, + prioritize_token_position: None, + q: None, + query_by: None, + query_by_weights: None, + remote_embedding_num_tries: None, + remote_embedding_timeout_ms: None, search_cutoff_ms: None, + snippet_threshold: None, + sort_by: None, + stopwords: None, + synonym_num_typos: None, + synonym_prefix: None, + text_match_type: None, + typo_tokens_threshold: None, use_cache: None, - cache_ttl: None, - min_len_1typo: None, - min_len_2typo: None, vector_query: None, - remote_embedding_timeout_ms: None, - remote_embedding_num_tries: None, + voice_query: None, } } } + diff --git a/typesense_codegen/src/models/multi_search_result.rs b/typesense_codegen/src/models/multi_search_result.rs index 1bab924..f1cc224 100644 --- a/typesense_codegen/src/models/multi_search_result.rs +++ b/typesense_codegen/src/models/multi_search_result.rs @@ -3,19 +3,28 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct MultiSearchResult { +pub struct MultiSearchResult { + #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] + pub conversation: Option>, #[serde(rename = "results")] - pub results: Vec>, + pub results: Vec, } -impl MultiSearchResult { - pub fn new(results: Vec>) -> Self { - Self { results } +impl MultiSearchResult { + pub fn new(results: Vec) -> MultiSearchResult { + MultiSearchResult { + conversation: None, + results, + } } } + diff --git a/typesense_codegen/src/models/multi_search_result_item.rs b/typesense_codegen/src/models/multi_search_result_item.rs new file mode 100644 index 0000000..8738365 --- /dev/null +++ b/typesense_codegen/src/models/multi_search_result_item.rs @@ -0,0 +1,71 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct MultiSearchResultItem { + #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] + pub conversation: Option>, + #[serde(rename = "facet_counts", skip_serializing_if = "Option::is_none")] + pub facet_counts: Option>, + /// The number of documents found + #[serde(rename = "found", skip_serializing_if = "Option::is_none")] + pub found: Option, + #[serde(rename = "found_docs", skip_serializing_if = "Option::is_none")] + pub found_docs: Option, + #[serde(rename = "grouped_hits", skip_serializing_if = "Option::is_none")] + pub grouped_hits: Option>, + /// The documents that matched the search query + #[serde(rename = "hits", skip_serializing_if = "Option::is_none")] + pub hits: Option>, + /// The total number of documents in the collection + #[serde(rename = "out_of", skip_serializing_if = "Option::is_none")] + pub out_of: Option, + /// The search result page number + #[serde(rename = "page", skip_serializing_if = "Option::is_none")] + pub page: Option, + #[serde(rename = "request_params", skip_serializing_if = "Option::is_none")] + pub request_params: Option>, + /// Whether the search was cut off + #[serde(rename = "search_cutoff", skip_serializing_if = "Option::is_none")] + pub search_cutoff: Option, + /// The number of milliseconds the search took + #[serde(rename = "search_time_ms", skip_serializing_if = "Option::is_none")] + pub search_time_ms: Option, + /// HTTP error code + #[serde(rename = "code", skip_serializing_if = "Option::is_none")] + pub code: Option, + /// Error description + #[serde(rename = "error", skip_serializing_if = "Option::is_none")] + pub error: Option, +} + +impl MultiSearchResultItem { + pub fn new() -> MultiSearchResultItem { + MultiSearchResultItem { + conversation: None, + facet_counts: None, + found: None, + found_docs: None, + grouped_hits: None, + hits: None, + out_of: None, + page: None, + request_params: None, + search_cutoff: None, + search_time_ms: None, + code: None, + error: None, + } + } +} + diff --git a/typesense_codegen/src/models/multi_search_searches_parameter.rs b/typesense_codegen/src/models/multi_search_searches_parameter.rs index 1e11b6e..3c3fff2 100644 --- a/typesense_codegen/src/models/multi_search_searches_parameter.rs +++ b/typesense_codegen/src/models/multi_search_searches_parameter.rs @@ -3,21 +3,29 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct MultiSearchSearchesParameter { #[serde(rename = "searches")] - pub searches: Vec, + pub searches: Vec, + /// When true, merges the search results from each search query into a single ordered set of hits. + #[serde(rename = "union", skip_serializing_if = "Option::is_none")] + pub union: Option, } impl MultiSearchSearchesParameter { - pub fn new( - searches: Vec, - ) -> MultiSearchSearchesParameter { - MultiSearchSearchesParameter { searches } + pub fn new(searches: Vec) -> MultiSearchSearchesParameter { + MultiSearchSearchesParameter { + searches, + union: None, + } } } + diff --git a/typesense_codegen/src/models/preset_delete_schema.rs b/typesense_codegen/src/models/preset_delete_schema.rs new file mode 100644 index 0000000..ea52d06 --- /dev/null +++ b/typesense_codegen/src/models/preset_delete_schema.rs @@ -0,0 +1,27 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct PresetDeleteSchema { + #[serde(rename = "name")] + pub name: String, +} + +impl PresetDeleteSchema { + pub fn new(name: String) -> PresetDeleteSchema { + PresetDeleteSchema { + name, + } + } +} + diff --git a/typesense_codegen/src/models/preset_schema.rs b/typesense_codegen/src/models/preset_schema.rs new file mode 100644 index 0000000..7987eb1 --- /dev/null +++ b/typesense_codegen/src/models/preset_schema.rs @@ -0,0 +1,30 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct PresetSchema { + #[serde(rename = "value")] + pub value: Box, + #[serde(rename = "name")] + pub name: String, +} + +impl PresetSchema { + pub fn new(value: models::PresetUpsertSchemaValue, name: String) -> PresetSchema { + PresetSchema { + value: Box::new(value), + name, + } + } +} + diff --git a/typesense_codegen/src/models/preset_upsert_schema.rs b/typesense_codegen/src/models/preset_upsert_schema.rs new file mode 100644 index 0000000..ec7175e --- /dev/null +++ b/typesense_codegen/src/models/preset_upsert_schema.rs @@ -0,0 +1,27 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct PresetUpsertSchema { + #[serde(rename = "value")] + pub value: Box, +} + +impl PresetUpsertSchema { + pub fn new(value: models::PresetUpsertSchemaValue) -> PresetUpsertSchema { + PresetUpsertSchema { + value: Box::new(value), + } + } +} + diff --git a/typesense_codegen/src/models/preset_upsert_schema_value.rs b/typesense_codegen/src/models/preset_upsert_schema_value.rs new file mode 100644 index 0000000..afb9398 --- /dev/null +++ b/typesense_codegen/src/models/preset_upsert_schema_value.rs @@ -0,0 +1,26 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] +pub enum PresetUpsertSchemaValue { + SearchParameters(Box), + MultiSearchSearchesParameter(Box), +} + +impl Default for PresetUpsertSchemaValue { + fn default() -> Self { + Self::SearchParameters(Default::default()) + } +} + diff --git a/typesense_codegen/src/models/presets_retrieve_schema.rs b/typesense_codegen/src/models/presets_retrieve_schema.rs new file mode 100644 index 0000000..22e5b12 --- /dev/null +++ b/typesense_codegen/src/models/presets_retrieve_schema.rs @@ -0,0 +1,27 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct PresetsRetrieveSchema { + #[serde(rename = "presets")] + pub presets: Vec, +} + +impl PresetsRetrieveSchema { + pub fn new(presets: Vec) -> PresetsRetrieveSchema { + PresetsRetrieveSchema { + presets, + } + } +} + diff --git a/typesense_codegen/src/models/schema_change_status.rs b/typesense_codegen/src/models/schema_change_status.rs new file mode 100644 index 0000000..3914832 --- /dev/null +++ b/typesense_codegen/src/models/schema_change_status.rs @@ -0,0 +1,36 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct SchemaChangeStatus { + /// Number of documents that have been altered + #[serde(rename = "altered_docs", skip_serializing_if = "Option::is_none")] + pub altered_docs: Option, + /// Name of the collection being modified + #[serde(rename = "collection", skip_serializing_if = "Option::is_none")] + pub collection: Option, + /// Number of documents that have been validated + #[serde(rename = "validated_docs", skip_serializing_if = "Option::is_none")] + pub validated_docs: Option, +} + +impl SchemaChangeStatus { + pub fn new() -> SchemaChangeStatus { + SchemaChangeStatus { + altered_docs: None, + collection: None, + validated_docs: None, + } + } +} + diff --git a/typesense_codegen/src/models/scoped_key_parameters.rs b/typesense_codegen/src/models/scoped_key_parameters.rs index 4a88ffd..c777764 100644 --- a/typesense_codegen/src/models/scoped_key_parameters.rs +++ b/typesense_codegen/src/models/scoped_key_parameters.rs @@ -3,24 +3,28 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct ScopedKeyParameters { - #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, #[serde(rename = "expires_at", skip_serializing_if = "Option::is_none")] pub expires_at: Option, + #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] + pub filter_by: Option, } impl ScopedKeyParameters { pub fn new() -> ScopedKeyParameters { ScopedKeyParameters { - filter_by: None, expires_at: None, + filter_by: None, } } } + diff --git a/typesense_codegen/src/models/search_grouped_hit.rs b/typesense_codegen/src/models/search_grouped_hit.rs index b9e19cd..80b53c3 100644 --- a/typesense_codegen/src/models/search_grouped_hit.rs +++ b/typesense_codegen/src/models/search_grouped_hit.rs @@ -3,31 +3,32 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct SearchGroupedHit { +pub struct SearchGroupedHit { #[serde(rename = "found", skip_serializing_if = "Option::is_none")] pub found: Option, #[serde(rename = "group_key")] pub group_key: Vec, /// The documents that matched the search query #[serde(rename = "hits")] - pub hits: Vec>, + pub hits: Vec, } -impl SearchGroupedHit { - pub fn new( - group_key: Vec, - hits: Vec>, - ) -> Self { - Self { +impl SearchGroupedHit { + pub fn new(group_key: Vec, hits: Vec) -> SearchGroupedHit { + SearchGroupedHit { found: None, group_key, hits, } } } + diff --git a/typesense_codegen/src/models/search_highlight.rs b/typesense_codegen/src/models/search_highlight.rs index f5b7e2a..829685a 100644 --- a/typesense_codegen/src/models/search_highlight.rs +++ b/typesense_codegen/src/models/search_highlight.rs @@ -3,15 +3,23 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchHighlight { #[serde(rename = "field", skip_serializing_if = "Option::is_none")] pub field: Option, + /// The indices property will be present only for string[] fields and will contain the corresponding indices of the snippets in the search field + #[serde(rename = "indices", skip_serializing_if = "Option::is_none")] + pub indices: Option>, + #[serde(rename = "matched_tokens", skip_serializing_if = "Option::is_none")] + pub matched_tokens: Option>, /// Present only for (non-array) string fields #[serde(rename = "snippet", skip_serializing_if = "Option::is_none")] pub snippet: Option, @@ -24,23 +32,19 @@ pub struct SearchHighlight { /// Full field value with highlighting, present only for (array) string[] fields #[serde(rename = "values", skip_serializing_if = "Option::is_none")] pub values: Option>, - /// The indices property will be present only for string[] fields and will contain the corresponding indices of the snippets in the search field - #[serde(rename = "indices", skip_serializing_if = "Option::is_none")] - pub indices: Option>, - #[serde(rename = "matched_tokens", skip_serializing_if = "Option::is_none")] - pub matched_tokens: Option>, } impl SearchHighlight { pub fn new() -> SearchHighlight { SearchHighlight { field: None, + indices: None, + matched_tokens: None, snippet: None, snippets: None, value: None, values: None, - indices: None, - matched_tokens: None, } } } + diff --git a/typesense_codegen/src/models/search_override.rs b/typesense_codegen/src/models/search_override.rs index ed41718..ba68c84 100644 --- a/typesense_codegen/src/models/search_override.rs +++ b/typesense_codegen/src/models/search_override.rs @@ -3,43 +3,72 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchOverride { - #[serde(rename = "rule")] - pub rule: Box, - /// List of document `id`s that should be included in the search results with their corresponding `position`s. - #[serde(rename = "includes", skip_serializing_if = "Option::is_none")] - pub includes: Option>, + /// A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. + #[serde(rename = "effective_from_ts", skip_serializing_if = "Option::is_none")] + pub effective_from_ts: Option, + /// A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. + #[serde(rename = "effective_to_ts", skip_serializing_if = "Option::is_none")] + pub effective_to_ts: Option, /// List of document `id`s that should be excluded from the search results. #[serde(rename = "excludes", skip_serializing_if = "Option::is_none")] - pub excludes: Option>, - /// A filter by clause that is applied to any search query that matches the override rule. + pub excludes: Option>, + /// A filter by clause that is applied to any search query that matches the override rule. #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] pub filter_by: Option, - /// Indicates whether search query tokens that exist in the override's rule should be removed from the search query. - #[serde( - rename = "remove_matched_tokens", - skip_serializing_if = "Option::is_none" - )] + /// When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. + #[serde(rename = "filter_curated_hits", skip_serializing_if = "Option::is_none")] + pub filter_curated_hits: Option, + /// List of document `id`s that should be included in the search results with their corresponding `position`s. + #[serde(rename = "includes", skip_serializing_if = "Option::is_none")] + pub includes: Option>, + /// Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// Indicates whether search query tokens that exist in the override's rule should be removed from the search query. + #[serde(rename = "remove_matched_tokens", skip_serializing_if = "Option::is_none")] pub remove_matched_tokens: Option, + /// Replaces the current search query with this value, when the search query matches the override rule. + #[serde(rename = "replace_query", skip_serializing_if = "Option::is_none")] + pub replace_query: Option, + #[serde(rename = "rule")] + pub rule: Box, + /// A sort by clause that is applied to any search query that matches the override rule. + #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] + pub sort_by: Option, + /// When set to true, override processing will stop at the first matching rule. When set to false override processing will continue and multiple override actions will be triggered in sequence. Overrides are processed in the lexical sort order of their id field. Default: true. + #[serde(rename = "stop_processing", skip_serializing_if = "Option::is_none")] + pub stop_processing: Option, #[serde(rename = "id")] pub id: String, } impl SearchOverride { - pub fn new(rule: crate::models::SearchOverrideRule, id: String) -> SearchOverride { + pub fn new(rule: models::SearchOverrideRule, id: String) -> SearchOverride { SearchOverride { - rule: Box::new(rule), - includes: None, + effective_from_ts: None, + effective_to_ts: None, excludes: None, filter_by: None, + filter_curated_hits: None, + includes: None, + metadata: None, remove_matched_tokens: None, + replace_query: None, + rule: Box::new(rule), + sort_by: None, + stop_processing: None, id, } } } + diff --git a/typesense_codegen/src/models/search_override_delete_response.rs b/typesense_codegen/src/models/search_override_delete_response.rs new file mode 100644 index 0000000..bc0aeb1 --- /dev/null +++ b/typesense_codegen/src/models/search_override_delete_response.rs @@ -0,0 +1,28 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct SearchOverrideDeleteResponse { + /// The id of the override that was deleted + #[serde(rename = "id")] + pub id: String, +} + +impl SearchOverrideDeleteResponse { + pub fn new(id: String) -> SearchOverrideDeleteResponse { + SearchOverrideDeleteResponse { + id, + } + } +} + diff --git a/typesense_codegen/src/models/search_override_exclude.rs b/typesense_codegen/src/models/search_override_exclude.rs index 1cea902..dddbf43 100644 --- a/typesense_codegen/src/models/search_override_exclude.rs +++ b/typesense_codegen/src/models/search_override_exclude.rs @@ -3,11 +3,14 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchOverrideExclude { /// document id that should be excluded from the search results. @@ -17,6 +20,9 @@ pub struct SearchOverrideExclude { impl SearchOverrideExclude { pub fn new(id: String) -> SearchOverrideExclude { - SearchOverrideExclude { id } + SearchOverrideExclude { + id, + } } } + diff --git a/typesense_codegen/src/models/search_override_include.rs b/typesense_codegen/src/models/search_override_include.rs index 3cdbff2..fa10b74 100644 --- a/typesense_codegen/src/models/search_override_include.rs +++ b/typesense_codegen/src/models/search_override_include.rs @@ -3,11 +3,14 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchOverrideInclude { /// document id that should be included @@ -20,6 +23,10 @@ pub struct SearchOverrideInclude { impl SearchOverrideInclude { pub fn new(id: String, position: i32) -> SearchOverrideInclude { - SearchOverrideInclude { id, position } + SearchOverrideInclude { + id, + position, + } } } + diff --git a/typesense_codegen/src/models/search_override_rule.rs b/typesense_codegen/src/models/search_override_rule.rs index f5a339b..a40125a 100644 --- a/typesense_codegen/src/models/search_override_rule.rs +++ b/typesense_codegen/src/models/search_override_rule.rs @@ -3,28 +3,41 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchOverrideRule { + /// Indicates that the override should apply when the filter_by parameter in a search query exactly matches the string specified here (including backticks, spaces, brackets, etc). + #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] + pub filter_by: Option, + /// Indicates whether the match on the query term should be `exact` or `contains`. If we want to match all queries that contained the word `apple`, we will use the `contains` match instead. + #[serde(rename = "match", skip_serializing_if = "Option::is_none")] + pub r#match: Option, /// Indicates what search queries should be overridden - #[serde(rename = "query")] - pub query: String, - /// Indicates whether the match on the query term should be `exact` or `contains`. If we want to match all queries that contained the word `apple`, we will use the `contains` match instead. - #[serde(rename = "match")] - pub r#match: Match, + #[serde(rename = "query", skip_serializing_if = "Option::is_none")] + pub query: Option, + /// List of tag values to associate with this override rule. + #[serde(rename = "tags", skip_serializing_if = "Option::is_none")] + pub tags: Option>, } impl SearchOverrideRule { - pub fn new(query: String, r#match: Match) -> SearchOverrideRule { - SearchOverrideRule { query, r#match } + pub fn new() -> SearchOverrideRule { + SearchOverrideRule { + filter_by: None, + r#match: None, + query: None, + tags: None, + } } } - -/// Indicates whether the match on the query term should be `exact` or `contains`. If we want to match all queries that contained the word `apple`, we will use the `contains` match instead. +/// Indicates whether the match on the query term should be `exact` or `contains`. If we want to match all queries that contained the word `apple`, we will use the `contains` match instead. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Match { #[serde(rename = "exact")] @@ -38,3 +51,4 @@ impl Default for Match { Self::Exact } } + diff --git a/typesense_codegen/src/models/search_override_schema.rs b/typesense_codegen/src/models/search_override_schema.rs index 7b3eedb..1b1f334 100644 --- a/typesense_codegen/src/models/search_override_schema.rs +++ b/typesense_codegen/src/models/search_override_schema.rs @@ -3,40 +3,69 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchOverrideSchema { - #[serde(rename = "rule")] - pub rule: Box, - /// List of document `id`s that should be included in the search results with their corresponding `position`s. - #[serde(rename = "includes", skip_serializing_if = "Option::is_none")] - pub includes: Option>, + /// A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. + #[serde(rename = "effective_from_ts", skip_serializing_if = "Option::is_none")] + pub effective_from_ts: Option, + /// A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. + #[serde(rename = "effective_to_ts", skip_serializing_if = "Option::is_none")] + pub effective_to_ts: Option, /// List of document `id`s that should be excluded from the search results. #[serde(rename = "excludes", skip_serializing_if = "Option::is_none")] - pub excludes: Option>, - /// A filter by clause that is applied to any search query that matches the override rule. + pub excludes: Option>, + /// A filter by clause that is applied to any search query that matches the override rule. #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] pub filter_by: Option, - /// Indicates whether search query tokens that exist in the override's rule should be removed from the search query. - #[serde( - rename = "remove_matched_tokens", - skip_serializing_if = "Option::is_none" - )] + /// When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. + #[serde(rename = "filter_curated_hits", skip_serializing_if = "Option::is_none")] + pub filter_curated_hits: Option, + /// List of document `id`s that should be included in the search results with their corresponding `position`s. + #[serde(rename = "includes", skip_serializing_if = "Option::is_none")] + pub includes: Option>, + /// Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// Indicates whether search query tokens that exist in the override's rule should be removed from the search query. + #[serde(rename = "remove_matched_tokens", skip_serializing_if = "Option::is_none")] pub remove_matched_tokens: Option, + /// Replaces the current search query with this value, when the search query matches the override rule. + #[serde(rename = "replace_query", skip_serializing_if = "Option::is_none")] + pub replace_query: Option, + #[serde(rename = "rule")] + pub rule: Box, + /// A sort by clause that is applied to any search query that matches the override rule. + #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] + pub sort_by: Option, + /// When set to true, override processing will stop at the first matching rule. When set to false override processing will continue and multiple override actions will be triggered in sequence. Overrides are processed in the lexical sort order of their id field. Default: true. + #[serde(rename = "stop_processing", skip_serializing_if = "Option::is_none")] + pub stop_processing: Option, } impl SearchOverrideSchema { - pub fn new(rule: crate::models::SearchOverrideRule) -> SearchOverrideSchema { + pub fn new(rule: models::SearchOverrideRule) -> SearchOverrideSchema { SearchOverrideSchema { - rule: Box::new(rule), - includes: None, + effective_from_ts: None, + effective_to_ts: None, excludes: None, filter_by: None, + filter_curated_hits: None, + includes: None, + metadata: None, remove_matched_tokens: None, + replace_query: None, + rule: Box::new(rule), + sort_by: None, + stop_processing: None, } } } + diff --git a/typesense_codegen/src/models/search_overrides_response.rs b/typesense_codegen/src/models/search_overrides_response.rs index 1c4e49b..50e900b 100644 --- a/typesense_codegen/src/models/search_overrides_response.rs +++ b/typesense_codegen/src/models/search_overrides_response.rs @@ -3,19 +3,25 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchOverridesResponse { #[serde(rename = "overrides")] - pub overrides: Vec, + pub overrides: Vec, } impl SearchOverridesResponse { - pub fn new(overrides: Vec) -> SearchOverridesResponse { - SearchOverridesResponse { overrides } + pub fn new(overrides: Vec) -> SearchOverridesResponse { + SearchOverridesResponse { + overrides, + } } } + diff --git a/typesense_codegen/src/models/search_parameters.rs b/typesense_codegen/src/models/search_parameters.rs index 3c1fb07..c6bf1c8 100644 --- a/typesense_codegen/src/models/search_parameters.rs +++ b/typesense_codegen/src/models/search_parameters.rs @@ -3,247 +3,289 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchParameters { - /// The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. - #[serde(rename = "q")] - pub q: String, - /// A list of `string` fields that should be queried against. Multiple fields are separated with a comma. - #[serde(rename = "query_by")] - pub query_by: String, - /// The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. - #[serde(rename = "query_by_weights", skip_serializing_if = "Option::is_none")] - pub query_by_weights: Option, - /// In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. - #[serde(rename = "text_match_type", skip_serializing_if = "Option::is_none")] - pub text_match_type: Option, - /// Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. - #[serde(rename = "prefix", skip_serializing_if = "Option::is_none")] - pub prefix: Option, + /// The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. + #[serde(rename = "cache_ttl", skip_serializing_if = "Option::is_none")] + pub cache_ttl: Option, + /// Enable conversational search. + #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] + pub conversation: Option, + /// The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. + #[serde(rename = "conversation_id", skip_serializing_if = "Option::is_none")] + pub conversation_id: Option, + /// The Id of Conversation Model to be used. + #[serde(rename = "conversation_model_id", skip_serializing_if = "Option::is_none")] + pub conversation_model_id: Option, + #[serde(rename = "drop_tokens_mode", skip_serializing_if = "Option::is_none")] + pub drop_tokens_mode: Option, + /// If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 + #[serde(rename = "drop_tokens_threshold", skip_serializing_if = "Option::is_none")] + pub drop_tokens_threshold: Option, + /// Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true + #[serde(rename = "enable_highlight_v1", skip_serializing_if = "Option::is_none")] + pub enable_highlight_v1: Option, + /// If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false + #[serde(rename = "enable_overrides", skip_serializing_if = "Option::is_none")] + pub enable_overrides: Option, + /// If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true + #[serde(rename = "enable_synonyms", skip_serializing_if = "Option::is_none")] + pub enable_synonyms: Option, + /// Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. + #[serde(rename = "enable_typos_for_alpha_numerical_tokens", skip_serializing_if = "Option::is_none")] + pub enable_typos_for_alpha_numerical_tokens: Option, + /// Make Typesense disable typos for numerical tokens. + #[serde(rename = "enable_typos_for_numerical_tokens", skip_serializing_if = "Option::is_none")] + pub enable_typos_for_numerical_tokens: Option, + /// List of fields from the document to exclude in the search result + #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] + pub exclude_fields: Option, + /// Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). + #[serde(rename = "exhaustive_search", skip_serializing_if = "Option::is_none")] + pub exhaustive_search: Option, + /// A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. + #[serde(rename = "facet_by", skip_serializing_if = "Option::is_none")] + pub facet_by: Option, + /// Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". + #[serde(rename = "facet_query", skip_serializing_if = "Option::is_none")] + pub facet_query: Option, + /// Comma separated string of nested facet fields whose parent object should be returned in facet response. + #[serde(rename = "facet_return_parent", skip_serializing_if = "Option::is_none")] + pub facet_return_parent: Option, + /// Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). + #[serde(rename = "facet_strategy", skip_serializing_if = "Option::is_none")] + pub facet_strategy: Option, + /// Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. + #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] + pub filter_by: Option, + /// Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false + #[serde(rename = "filter_curated_hits", skip_serializing_if = "Option::is_none")] + pub filter_curated_hits: Option, + /// You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. + #[serde(rename = "group_by", skip_serializing_if = "Option::is_none")] + pub group_by: Option, + /// Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 + #[serde(rename = "group_limit", skip_serializing_if = "Option::is_none")] + pub group_limit: Option, + /// Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true + #[serde(rename = "group_missing_values", skip_serializing_if = "Option::is_none")] + pub group_missing_values: Option, + /// A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + #[serde(rename = "hidden_hits", skip_serializing_if = "Option::is_none")] + pub hidden_hits: Option, + /// The number of tokens that should surround the highlighted text on each side. Default: 4 + #[serde(rename = "highlight_affix_num_tokens", skip_serializing_if = "Option::is_none")] + pub highlight_affix_num_tokens: Option, + /// The end tag used for the highlighted snippets. Default: `` + #[serde(rename = "highlight_end_tag", skip_serializing_if = "Option::is_none")] + pub highlight_end_tag: Option, + /// A list of custom fields that must be highlighted even if you don't query for them + #[serde(rename = "highlight_fields", skip_serializing_if = "Option::is_none")] + pub highlight_fields: Option, + /// List of fields which should be highlighted fully without snippeting + #[serde(rename = "highlight_full_fields", skip_serializing_if = "Option::is_none")] + pub highlight_full_fields: Option, + /// The start tag used for the highlighted snippets. Default: `` + #[serde(rename = "highlight_start_tag", skip_serializing_if = "Option::is_none")] + pub highlight_start_tag: Option, + /// List of fields from the document to include in the search result + #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] + pub include_fields: Option, /// If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results #[serde(rename = "infix", skip_serializing_if = "Option::is_none")] pub infix: Option, + /// Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. + #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// Control the number of words that Typesense considers for typo and prefix searching. + #[serde(rename = "max_candidates", skip_serializing_if = "Option::is_none")] + pub max_candidates: Option, /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. #[serde(rename = "max_extra_prefix", skip_serializing_if = "Option::is_none")] pub max_extra_prefix: Option, /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. #[serde(rename = "max_extra_suffix", skip_serializing_if = "Option::is_none")] pub max_extra_suffix: Option, - /// Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. - #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, - /// A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` - #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] - pub sort_by: Option, - /// A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. - #[serde(rename = "facet_by", skip_serializing_if = "Option::is_none")] - pub facet_by: Option, /// Maximum number of facet values to be returned. #[serde(rename = "max_facet_values", skip_serializing_if = "Option::is_none")] pub max_facet_values: Option, - /// Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". - #[serde(rename = "facet_query", skip_serializing_if = "Option::is_none")] - pub facet_query: Option, - /// The number of typographical errors (1 or 2) that would be tolerated. Default: 2 + /// Controls the number of similar words that Typesense considers during fuzzy search on filter_by values. Useful for controlling prefix matches like company_name:Acm*. + #[serde(rename = "max_filter_by_candidates", skip_serializing_if = "Option::is_none")] + pub max_filter_by_candidates: Option, + /// Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + #[serde(rename = "min_len_1typo", skip_serializing_if = "Option::is_none")] + pub min_len_1typo: Option, + /// Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + #[serde(rename = "min_len_2typo", skip_serializing_if = "Option::is_none")] + pub min_len_2typo: Option, + /// The number of typographical errors (1 or 2) that would be tolerated. Default: 2 #[serde(rename = "num_typos", skip_serializing_if = "Option::is_none")] pub num_typos: Option, + /// Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. + #[serde(rename = "offset", skip_serializing_if = "Option::is_none")] + pub offset: Option, + /// Comma separated list of tags to trigger the curations rules that match the tags. + #[serde(rename = "override_tags", skip_serializing_if = "Option::is_none")] + pub override_tags: Option, /// Results from this specific page number would be fetched. #[serde(rename = "page", skip_serializing_if = "Option::is_none")] pub page: Option, /// Number of results to fetch per page. Default: 10 #[serde(rename = "per_page", skip_serializing_if = "Option::is_none")] pub per_page: Option, - /// Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. - #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. - #[serde(rename = "offset", skip_serializing_if = "Option::is_none")] - pub offset: Option, - /// You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. - #[serde(rename = "group_by", skip_serializing_if = "Option::is_none")] - pub group_by: Option, - /// Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 - #[serde(rename = "group_limit", skip_serializing_if = "Option::is_none")] - pub group_limit: Option, - /// List of fields from the document to include in the search result - #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] - pub include_fields: Option, - /// List of fields from the document to exclude in the search result - #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] - pub exclude_fields: Option, - /// List of fields which should be highlighted fully without snippeting - #[serde( - rename = "highlight_full_fields", - skip_serializing_if = "Option::is_none" - )] - pub highlight_full_fields: Option, - /// The number of tokens that should surround the highlighted text on each side. Default: 4 - #[serde( - rename = "highlight_affix_num_tokens", - skip_serializing_if = "Option::is_none" - )] - pub highlight_affix_num_tokens: Option, - /// The start tag used for the highlighted snippets. Default: `` - #[serde( - rename = "highlight_start_tag", - skip_serializing_if = "Option::is_none" - )] - pub highlight_start_tag: Option, - /// The end tag used for the highlighted snippets. Default: `` - #[serde(rename = "highlight_end_tag", skip_serializing_if = "Option::is_none")] - pub highlight_end_tag: Option, - /// Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true - #[serde( - rename = "enable_highlight_v1", - skip_serializing_if = "Option::is_none" - )] - pub enable_highlight_v1: Option, - /// Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 - #[serde(rename = "snippet_threshold", skip_serializing_if = "Option::is_none")] - pub snippet_threshold: Option, - /// If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 - #[serde( - rename = "drop_tokens_threshold", - skip_serializing_if = "Option::is_none" - )] - pub drop_tokens_threshold: Option, - /// If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 - #[serde( - rename = "typo_tokens_threshold", - skip_serializing_if = "Option::is_none" - )] - pub typo_tokens_threshold: Option, - /// A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + /// A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. #[serde(rename = "pinned_hits", skip_serializing_if = "Option::is_none")] pub pinned_hits: Option, - /// A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - #[serde(rename = "hidden_hits", skip_serializing_if = "Option::is_none")] - pub hidden_hits: Option, - /// A list of custom fields that must be highlighted even if you don't query for them - #[serde(rename = "highlight_fields", skip_serializing_if = "Option::is_none")] - pub highlight_fields: Option, - /// Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. Splitting/joining of tokens will only be attempted if the original query produces no results. To always trigger this behavior, set value to `always``. To disable, set value to `off`. Default is `fallback`. - #[serde(rename = "split_join_tokens", skip_serializing_if = "Option::is_none")] - pub split_join_tokens: Option, - /// You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same - #[serde( - rename = "pre_segmented_query", - skip_serializing_if = "Option::is_none" - )] + /// You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same + #[serde(rename = "pre_segmented_query", skip_serializing_if = "Option::is_none")] pub pre_segmented_query: Option, - /// Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. + /// Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. + #[serde(rename = "prefix", skip_serializing_if = "Option::is_none")] + pub prefix: Option, + /// Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. #[serde(rename = "preset", skip_serializing_if = "Option::is_none")] pub preset: Option, - /// If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false - #[serde(rename = "enable_overrides", skip_serializing_if = "Option::is_none")] - pub enable_overrides: Option, - /// Set this parameter to true to ensure that an exact match is ranked above the others - #[serde( - rename = "prioritize_exact_match", - skip_serializing_if = "Option::is_none" - )] + /// Set this parameter to true to ensure that an exact match is ranked above the others + #[serde(rename = "prioritize_exact_match", skip_serializing_if = "Option::is_none")] pub prioritize_exact_match: Option, - /// Control the number of words that Typesense considers for typo and prefix searching. - #[serde(rename = "max_candidates", skip_serializing_if = "Option::is_none")] - pub max_candidates: Option, - /// Make Typesense prioritize documents where the query words appear earlier in the text. - #[serde( - rename = "prioritize_token_position", - skip_serializing_if = "Option::is_none" - )] + /// Make Typesense prioritize documents where the query words appear in more number of fields. + #[serde(rename = "prioritize_num_matching_fields", skip_serializing_if = "Option::is_none")] + pub prioritize_num_matching_fields: Option, + /// Make Typesense prioritize documents where the query words appear earlier in the text. + #[serde(rename = "prioritize_token_position", skip_serializing_if = "Option::is_none")] pub prioritize_token_position: Option, - /// Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). - #[serde(rename = "exhaustive_search", skip_serializing_if = "Option::is_none")] - pub exhaustive_search: Option, - /// Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. + /// The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. + #[serde(rename = "q", skip_serializing_if = "Option::is_none")] + pub q: Option, + /// A list of `string` fields that should be queried against. Multiple fields are separated with a comma. + #[serde(rename = "query_by", skip_serializing_if = "Option::is_none")] + pub query_by: Option, + /// The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. + #[serde(rename = "query_by_weights", skip_serializing_if = "Option::is_none")] + pub query_by_weights: Option, + /// Number of times to retry fetching remote embeddings. + #[serde(rename = "remote_embedding_num_tries", skip_serializing_if = "Option::is_none")] + pub remote_embedding_num_tries: Option, + /// Timeout (in milliseconds) for fetching remote embeddings. + #[serde(rename = "remote_embedding_timeout_ms", skip_serializing_if = "Option::is_none")] + pub remote_embedding_timeout_ms: Option, + /// Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. #[serde(rename = "search_cutoff_ms", skip_serializing_if = "Option::is_none")] pub search_cutoff_ms: Option, - /// Enable server side caching of search query results. By default, caching is disabled. + /// Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 + #[serde(rename = "snippet_threshold", skip_serializing_if = "Option::is_none")] + pub snippet_threshold: Option, + /// A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` + #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] + pub sort_by: Option, + /// Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. Splitting/joining of tokens will only be attempted if the original query produces no results. To always trigger this behavior, set value to `always``. To disable, set value to `off`. Default is `fallback`. + #[serde(rename = "split_join_tokens", skip_serializing_if = "Option::is_none")] + pub split_join_tokens: Option, + /// Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. + #[serde(rename = "stopwords", skip_serializing_if = "Option::is_none")] + pub stopwords: Option, + /// Allow synonym resolution on typo-corrected words in the query. Default: 0 + #[serde(rename = "synonym_num_typos", skip_serializing_if = "Option::is_none")] + pub synonym_num_typos: Option, + /// Allow synonym resolution on word prefixes in the query. Default: false + #[serde(rename = "synonym_prefix", skip_serializing_if = "Option::is_none")] + pub synonym_prefix: Option, + /// In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. + #[serde(rename = "text_match_type", skip_serializing_if = "Option::is_none")] + pub text_match_type: Option, + /// If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 + #[serde(rename = "typo_tokens_threshold", skip_serializing_if = "Option::is_none")] + pub typo_tokens_threshold: Option, + /// Enable server side caching of search query results. By default, caching is disabled. #[serde(rename = "use_cache", skip_serializing_if = "Option::is_none")] pub use_cache: Option, - /// The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. - #[serde(rename = "cache_ttl", skip_serializing_if = "Option::is_none")] - pub cache_ttl: Option, - /// Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - #[serde(rename = "min_len_1typo", skip_serializing_if = "Option::is_none")] - pub min_len_1typo: Option, - /// Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - #[serde(rename = "min_len_2typo", skip_serializing_if = "Option::is_none")] - pub min_len_2typo: Option, - /// Vector query expression for fetching documents \"closest\" to a given query/document vector. + /// Vector query expression for fetching documents \"closest\" to a given query/document vector. #[serde(rename = "vector_query", skip_serializing_if = "Option::is_none")] pub vector_query: Option, - /// Timeout (in milliseconds) for fetching remote embeddings. - #[serde( - rename = "remote_embedding_timeout_ms", - skip_serializing_if = "Option::is_none" - )] - pub remote_embedding_timeout_ms: Option, - /// Number of times to retry fetching remote embeddings. - #[serde( - rename = "remote_embedding_num_tries", - skip_serializing_if = "Option::is_none" - )] - pub remote_embedding_num_tries: Option, + /// The base64 encoded audio file in 16 khz 16-bit WAV format. + #[serde(rename = "voice_query", skip_serializing_if = "Option::is_none")] + pub voice_query: Option, } impl SearchParameters { - pub fn new(q: String, query_by: String) -> SearchParameters { + pub fn new() -> SearchParameters { SearchParameters { - q, - query_by, - query_by_weights: None, - text_match_type: None, - prefix: None, + cache_ttl: None, + conversation: None, + conversation_id: None, + conversation_model_id: None, + drop_tokens_mode: None, + drop_tokens_threshold: None, + enable_highlight_v1: None, + enable_overrides: None, + enable_synonyms: None, + enable_typos_for_alpha_numerical_tokens: None, + enable_typos_for_numerical_tokens: None, + exclude_fields: None, + exhaustive_search: None, + facet_by: None, + facet_query: None, + facet_return_parent: None, + facet_strategy: None, + filter_by: None, + filter_curated_hits: None, + group_by: None, + group_limit: None, + group_missing_values: None, + hidden_hits: None, + highlight_affix_num_tokens: None, + highlight_end_tag: None, + highlight_fields: None, + highlight_full_fields: None, + highlight_start_tag: None, + include_fields: None, infix: None, + limit: None, + max_candidates: None, max_extra_prefix: None, max_extra_suffix: None, - filter_by: None, - sort_by: None, - facet_by: None, max_facet_values: None, - facet_query: None, + max_filter_by_candidates: None, + min_len_1typo: None, + min_len_2typo: None, num_typos: None, + offset: None, + override_tags: None, page: None, per_page: None, - limit: None, - offset: None, - group_by: None, - group_limit: None, - include_fields: None, - exclude_fields: None, - highlight_full_fields: None, - highlight_affix_num_tokens: None, - highlight_start_tag: None, - highlight_end_tag: None, - enable_highlight_v1: None, - snippet_threshold: None, - drop_tokens_threshold: None, - typo_tokens_threshold: None, pinned_hits: None, - hidden_hits: None, - highlight_fields: None, - split_join_tokens: None, pre_segmented_query: None, + prefix: None, preset: None, - enable_overrides: None, prioritize_exact_match: None, - max_candidates: None, + prioritize_num_matching_fields: None, prioritize_token_position: None, - exhaustive_search: None, + q: None, + query_by: None, + query_by_weights: None, + remote_embedding_num_tries: None, + remote_embedding_timeout_ms: None, search_cutoff_ms: None, + snippet_threshold: None, + sort_by: None, + split_join_tokens: None, + stopwords: None, + synonym_num_typos: None, + synonym_prefix: None, + text_match_type: None, + typo_tokens_threshold: None, use_cache: None, - cache_ttl: None, - min_len_1typo: None, - min_len_2typo: None, vector_query: None, - remote_embedding_timeout_ms: None, - remote_embedding_num_tries: None, + voice_query: None, } } } + diff --git a/typesense_codegen/src/models/search_result.rs b/typesense_codegen/src/models/search_result.rs index 11527ab..fe158cd 100644 --- a/typesense_codegen/src/models/search_result.rs +++ b/typesense_codegen/src/models/search_result.rs @@ -3,51 +3,61 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct SearchResult { +pub struct SearchResult { + #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] + pub conversation: Option>, #[serde(rename = "facet_counts", skip_serializing_if = "Option::is_none")] - pub facet_counts: Option>, + pub facet_counts: Option>, /// The number of documents found #[serde(rename = "found", skip_serializing_if = "Option::is_none")] pub found: Option, - /// The number of milliseconds the search took - #[serde(rename = "search_time_ms", skip_serializing_if = "Option::is_none")] - pub search_time_ms: Option, + #[serde(rename = "found_docs", skip_serializing_if = "Option::is_none")] + pub found_docs: Option, + #[serde(rename = "grouped_hits", skip_serializing_if = "Option::is_none")] + pub grouped_hits: Option>, + /// The documents that matched the search query + #[serde(rename = "hits", skip_serializing_if = "Option::is_none")] + pub hits: Option>, /// The total number of documents in the collection #[serde(rename = "out_of", skip_serializing_if = "Option::is_none")] pub out_of: Option, - /// Whether the search was cut off - #[serde(rename = "search_cutoff", skip_serializing_if = "Option::is_none")] - pub search_cutoff: Option, /// The search result page number #[serde(rename = "page", skip_serializing_if = "Option::is_none")] pub page: Option, - #[serde(rename = "grouped_hits", skip_serializing_if = "Option::is_none")] - pub grouped_hits: Option>>, - /// The documents that matched the search query - #[serde(rename = "hits", skip_serializing_if = "Option::is_none")] - pub hits: Option>>, #[serde(rename = "request_params", skip_serializing_if = "Option::is_none")] - pub request_params: Option>, + pub request_params: Option>, + /// Whether the search was cut off + #[serde(rename = "search_cutoff", skip_serializing_if = "Option::is_none")] + pub search_cutoff: Option, + /// The number of milliseconds the search took + #[serde(rename = "search_time_ms", skip_serializing_if = "Option::is_none")] + pub search_time_ms: Option, } -impl SearchResult { - pub fn new() -> Self { - Self { +impl SearchResult { + pub fn new() -> SearchResult { + SearchResult { + conversation: None, facet_counts: None, found: None, - search_time_ms: None, - out_of: None, - search_cutoff: None, - page: None, + found_docs: None, grouped_hits: None, hits: None, + out_of: None, + page: None, request_params: None, + search_cutoff: None, + search_time_ms: None, } } } + diff --git a/typesense_codegen/src/models/search_result_conversation.rs b/typesense_codegen/src/models/search_result_conversation.rs new file mode 100644 index 0000000..fc3b51a --- /dev/null +++ b/typesense_codegen/src/models/search_result_conversation.rs @@ -0,0 +1,36 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct SearchResultConversation { + #[serde(rename = "answer")] + pub answer: String, + #[serde(rename = "conversation_history")] + pub conversation_history: Vec, + #[serde(rename = "conversation_id")] + pub conversation_id: String, + #[serde(rename = "query")] + pub query: String, +} + +impl SearchResultConversation { + pub fn new(answer: String, conversation_history: Vec, conversation_id: String, query: String) -> SearchResultConversation { + SearchResultConversation { + answer, + conversation_history, + conversation_id, + query, + } + } +} + diff --git a/typesense_codegen/src/models/search_result_hit.rs b/typesense_codegen/src/models/search_result_hit.rs index d637e65..3b25305 100644 --- a/typesense_codegen/src/models/search_result_hit.rs +++ b/typesense_codegen/src/models/search_result_hit.rs @@ -3,44 +3,48 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct SearchResultHit { - /// (Deprecated) Contains highlighted portions of the search fields - #[serde(rename = "highlights", skip_serializing_if = "Option::is_none")] - pub highlights: Option>, - /// Highlighted version of the matching document - #[serde(rename = "highlight", skip_serializing_if = "Option::is_none")] - pub highlight: Option<::std::collections::HashMap>, +pub struct SearchResultHit { /// Can be any key-value pair #[serde(rename = "document", skip_serializing_if = "Option::is_none")] - pub document: Option, + pub document: Option, + /// Can be any key-value pair + #[serde(rename = "geo_distance_meters", skip_serializing_if = "Option::is_none")] + pub geo_distance_meters: Option>, + /// Highlighted version of the matching document + #[serde(rename = "highlight", skip_serializing_if = "Option::is_none")] + pub highlight: Option>, + /// (Deprecated) Contains highlighted portions of the search fields + #[serde(rename = "highlights", skip_serializing_if = "Option::is_none")] + pub highlights: Option>, #[serde(rename = "text_match", skip_serializing_if = "Option::is_none")] pub text_match: Option, - /// Can be any key-value pair - #[serde( - rename = "geo_distance_meters", - skip_serializing_if = "Option::is_none" - )] - pub geo_distance_meters: Option<::std::collections::HashMap>, + #[serde(rename = "text_match_info", skip_serializing_if = "Option::is_none")] + pub text_match_info: Option>, /// Distance between the query vector and matching document's vector value #[serde(rename = "vector_distance", skip_serializing_if = "Option::is_none")] pub vector_distance: Option, } -impl SearchResultHit { - pub fn new() -> Self { - Self { - highlights: None, - highlight: None, +impl SearchResultHit { + pub fn new() -> SearchResultHit { + SearchResultHit { document: None, - text_match: None, geo_distance_meters: None, + highlight: None, + highlights: None, + text_match: None, + text_match_info: None, vector_distance: None, } } } + diff --git a/typesense_codegen/src/models/search_result_hit_text_match_info.rs b/typesense_codegen/src/models/search_result_hit_text_match_info.rs new file mode 100644 index 0000000..c1a7d1f --- /dev/null +++ b/typesense_codegen/src/models/search_result_hit_text_match_info.rs @@ -0,0 +1,45 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct SearchResultHitTextMatchInfo { + #[serde(rename = "best_field_score", skip_serializing_if = "Option::is_none")] + pub best_field_score: Option, + #[serde(rename = "best_field_weight", skip_serializing_if = "Option::is_none")] + pub best_field_weight: Option, + #[serde(rename = "fields_matched", skip_serializing_if = "Option::is_none")] + pub fields_matched: Option, + #[serde(rename = "num_tokens_dropped", skip_serializing_if = "Option::is_none")] + pub num_tokens_dropped: Option, + #[serde(rename = "score", skip_serializing_if = "Option::is_none")] + pub score: Option, + #[serde(rename = "tokens_matched", skip_serializing_if = "Option::is_none")] + pub tokens_matched: Option, + #[serde(rename = "typo_prefix_score", skip_serializing_if = "Option::is_none")] + pub typo_prefix_score: Option, +} + +impl SearchResultHitTextMatchInfo { + pub fn new() -> SearchResultHitTextMatchInfo { + SearchResultHitTextMatchInfo { + best_field_score: None, + best_field_weight: None, + fields_matched: None, + num_tokens_dropped: None, + score: None, + tokens_matched: None, + typo_prefix_score: None, + } + } +} + diff --git a/typesense_codegen/src/models/search_result_request_params.rs b/typesense_codegen/src/models/search_result_request_params.rs index 94e323b..eb44758 100644 --- a/typesense_codegen/src/models/search_result_request_params.rs +++ b/typesense_codegen/src/models/search_result_request_params.rs @@ -3,27 +3,34 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchResultRequestParams { #[serde(rename = "collection_name")] pub collection_name: String, - #[serde(rename = "q")] - pub q: String, #[serde(rename = "per_page")] pub per_page: i32, + #[serde(rename = "q")] + pub q: String, + #[serde(rename = "voice_query", skip_serializing_if = "Option::is_none")] + pub voice_query: Option>, } impl SearchResultRequestParams { - pub fn new(collection_name: String, q: String, per_page: i32) -> SearchResultRequestParams { + pub fn new(collection_name: String, per_page: i32, q: String) -> SearchResultRequestParams { SearchResultRequestParams { collection_name, - q, per_page, + q, + voice_query: None, } } } + diff --git a/typesense_codegen/src/models/search_result_request_params_voice_query.rs b/typesense_codegen/src/models/search_result_request_params_voice_query.rs new file mode 100644 index 0000000..87ce57b --- /dev/null +++ b/typesense_codegen/src/models/search_result_request_params_voice_query.rs @@ -0,0 +1,27 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct SearchResultRequestParamsVoiceQuery { + #[serde(rename = "transcribed_query", skip_serializing_if = "Option::is_none")] + pub transcribed_query: Option, +} + +impl SearchResultRequestParamsVoiceQuery { + pub fn new() -> SearchResultRequestParamsVoiceQuery { + SearchResultRequestParamsVoiceQuery { + transcribed_query: None, + } + } +} + diff --git a/typesense_codegen/src/models/search_synonym.rs b/typesense_codegen/src/models/search_synonym.rs index acf9fd8..a252b0b 100644 --- a/typesense_codegen/src/models/search_synonym.rs +++ b/typesense_codegen/src/models/search_synonym.rs @@ -3,16 +3,25 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchSynonym { + /// Locale for the synonym, leave blank to use the standard tokenizer. + #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] + pub locale: Option, /// For 1-way synonyms, indicates the root word that words in the `synonyms` parameter map to. #[serde(rename = "root", skip_serializing_if = "Option::is_none")] pub root: Option, + /// By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is. + #[serde(rename = "symbols_to_index", skip_serializing_if = "Option::is_none")] + pub symbols_to_index: Option>, /// Array of words that should be considered as synonyms. #[serde(rename = "synonyms")] pub synonyms: Vec, @@ -23,9 +32,12 @@ pub struct SearchSynonym { impl SearchSynonym { pub fn new(synonyms: Vec, id: String) -> SearchSynonym { SearchSynonym { + locale: None, root: None, + symbols_to_index: None, synonyms, id, } } } + diff --git a/typesense_codegen/src/models/search_synonym_delete_response.rs b/typesense_codegen/src/models/search_synonym_delete_response.rs new file mode 100644 index 0000000..06ff769 --- /dev/null +++ b/typesense_codegen/src/models/search_synonym_delete_response.rs @@ -0,0 +1,28 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct SearchSynonymDeleteResponse { + /// The id of the synonym that was deleted + #[serde(rename = "id")] + pub id: String, +} + +impl SearchSynonymDeleteResponse { + pub fn new(id: String) -> SearchSynonymDeleteResponse { + SearchSynonymDeleteResponse { + id, + } + } +} + diff --git a/typesense_codegen/src/models/search_synonym_schema.rs b/typesense_codegen/src/models/search_synonym_schema.rs index eab5596..a6d9923 100644 --- a/typesense_codegen/src/models/search_synonym_schema.rs +++ b/typesense_codegen/src/models/search_synonym_schema.rs @@ -3,16 +3,25 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchSynonymSchema { + /// Locale for the synonym, leave blank to use the standard tokenizer. + #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] + pub locale: Option, /// For 1-way synonyms, indicates the root word that words in the `synonyms` parameter map to. #[serde(rename = "root", skip_serializing_if = "Option::is_none")] pub root: Option, + /// By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is. + #[serde(rename = "symbols_to_index", skip_serializing_if = "Option::is_none")] + pub symbols_to_index: Option>, /// Array of words that should be considered as synonyms. #[serde(rename = "synonyms")] pub synonyms: Vec, @@ -21,8 +30,11 @@ pub struct SearchSynonymSchema { impl SearchSynonymSchema { pub fn new(synonyms: Vec) -> SearchSynonymSchema { SearchSynonymSchema { + locale: None, root: None, + symbols_to_index: None, synonyms, } } } + diff --git a/typesense_codegen/src/models/search_synonyms_response.rs b/typesense_codegen/src/models/search_synonyms_response.rs index a3fefdf..d791123 100644 --- a/typesense_codegen/src/models/search_synonyms_response.rs +++ b/typesense_codegen/src/models/search_synonyms_response.rs @@ -3,19 +3,25 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchSynonymsResponse { #[serde(rename = "synonyms")] - pub synonyms: Vec, + pub synonyms: Vec, } impl SearchSynonymsResponse { - pub fn new(synonyms: Vec) -> SearchSynonymsResponse { - SearchSynonymsResponse { synonyms } + pub fn new(synonyms: Vec) -> SearchSynonymsResponse { + SearchSynonymsResponse { + synonyms, + } } } + diff --git a/typesense_codegen/src/models/snapshot_parameters.rs b/typesense_codegen/src/models/snapshot_parameters.rs index aa2c6a5..4a989ad 100644 --- a/typesense_codegen/src/models/snapshot_parameters.rs +++ b/typesense_codegen/src/models/snapshot_parameters.rs @@ -3,11 +3,14 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SnapshotParameters { #[serde(rename = "snapshot_path", skip_serializing_if = "Option::is_none")] @@ -21,3 +24,4 @@ impl SnapshotParameters { } } } + diff --git a/typesense_codegen/src/models/stemming_dictionary.rs b/typesense_codegen/src/models/stemming_dictionary.rs new file mode 100644 index 0000000..78d997f --- /dev/null +++ b/typesense_codegen/src/models/stemming_dictionary.rs @@ -0,0 +1,32 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct StemmingDictionary { + /// Unique identifier for the dictionary + #[serde(rename = "id")] + pub id: String, + /// List of word mappings in the dictionary + #[serde(rename = "words")] + pub words: Vec, +} + +impl StemmingDictionary { + pub fn new(id: String, words: Vec) -> StemmingDictionary { + StemmingDictionary { + id, + words, + } + } +} + diff --git a/typesense_codegen/src/models/stemming_dictionary_words_inner.rs b/typesense_codegen/src/models/stemming_dictionary_words_inner.rs new file mode 100644 index 0000000..ef6c067 --- /dev/null +++ b/typesense_codegen/src/models/stemming_dictionary_words_inner.rs @@ -0,0 +1,32 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct StemmingDictionaryWordsInner { + /// The root form of the word + #[serde(rename = "root")] + pub root: String, + /// The word form to be stemmed + #[serde(rename = "word")] + pub word: String, +} + +impl StemmingDictionaryWordsInner { + pub fn new(root: String, word: String) -> StemmingDictionaryWordsInner { + StemmingDictionaryWordsInner { + root, + word, + } + } +} + diff --git a/typesense_codegen/src/models/stopwords_set_retrieve_schema.rs b/typesense_codegen/src/models/stopwords_set_retrieve_schema.rs new file mode 100644 index 0000000..d4ec0c8 --- /dev/null +++ b/typesense_codegen/src/models/stopwords_set_retrieve_schema.rs @@ -0,0 +1,27 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct StopwordsSetRetrieveSchema { + #[serde(rename = "stopwords")] + pub stopwords: Box, +} + +impl StopwordsSetRetrieveSchema { + pub fn new(stopwords: models::StopwordsSetSchema) -> StopwordsSetRetrieveSchema { + StopwordsSetRetrieveSchema { + stopwords: Box::new(stopwords), + } + } +} + diff --git a/typesense_codegen/src/models/stopwords_set_schema.rs b/typesense_codegen/src/models/stopwords_set_schema.rs new file mode 100644 index 0000000..1a9e9c9 --- /dev/null +++ b/typesense_codegen/src/models/stopwords_set_schema.rs @@ -0,0 +1,33 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct StopwordsSetSchema { + #[serde(rename = "id")] + pub id: String, + #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] + pub locale: Option, + #[serde(rename = "stopwords")] + pub stopwords: Vec, +} + +impl StopwordsSetSchema { + pub fn new(id: String, stopwords: Vec) -> StopwordsSetSchema { + StopwordsSetSchema { + id, + locale: None, + stopwords, + } + } +} + diff --git a/typesense_codegen/src/models/stopwords_set_upsert_schema.rs b/typesense_codegen/src/models/stopwords_set_upsert_schema.rs new file mode 100644 index 0000000..9900fb8 --- /dev/null +++ b/typesense_codegen/src/models/stopwords_set_upsert_schema.rs @@ -0,0 +1,30 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct StopwordsSetUpsertSchema { + #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] + pub locale: Option, + #[serde(rename = "stopwords")] + pub stopwords: Vec, +} + +impl StopwordsSetUpsertSchema { + pub fn new(stopwords: Vec) -> StopwordsSetUpsertSchema { + StopwordsSetUpsertSchema { + locale: None, + stopwords, + } + } +} + diff --git a/typesense_codegen/src/models/stopwords_sets_retrieve_all_schema.rs b/typesense_codegen/src/models/stopwords_sets_retrieve_all_schema.rs new file mode 100644 index 0000000..06344c6 --- /dev/null +++ b/typesense_codegen/src/models/stopwords_sets_retrieve_all_schema.rs @@ -0,0 +1,27 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct StopwordsSetsRetrieveAllSchema { + #[serde(rename = "stopwords")] + pub stopwords: Vec, +} + +impl StopwordsSetsRetrieveAllSchema { + pub fn new(stopwords: Vec) -> StopwordsSetsRetrieveAllSchema { + StopwordsSetsRetrieveAllSchema { + stopwords, + } + } +} + diff --git a/typesense_codegen/src/models/success_status.rs b/typesense_codegen/src/models/success_status.rs index b7d3724..ee2ad94 100644 --- a/typesense_codegen/src/models/success_status.rs +++ b/typesense_codegen/src/models/success_status.rs @@ -3,11 +3,14 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SuccessStatus { #[serde(rename = "success")] @@ -16,6 +19,9 @@ pub struct SuccessStatus { impl SuccessStatus { pub fn new(success: bool) -> SuccessStatus { - SuccessStatus { success } + SuccessStatus { + success, + } } } + diff --git a/typesense_codegen/src/models/update_documents_200_response.rs b/typesense_codegen/src/models/update_documents_200_response.rs index eff0990..e4be1a3 100644 --- a/typesense_codegen/src/models/update_documents_200_response.rs +++ b/typesense_codegen/src/models/update_documents_200_response.rs @@ -3,11 +3,14 @@ * * An open source search engine for building delightful search experiences. * - * The version of the OpenAPI document: 0.25.0 - * + * The version of the OpenAPI document: 28.0 + * * Generated by: https://openapi-generator.tech */ +use crate::models; +use serde::{Deserialize, Serialize}; + #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct UpdateDocuments200Response { /// The number of documents that have been updated @@ -17,6 +20,9 @@ pub struct UpdateDocuments200Response { impl UpdateDocuments200Response { pub fn new(num_updated: i32) -> UpdateDocuments200Response { - UpdateDocuments200Response { num_updated } + UpdateDocuments200Response { + num_updated, + } } } + diff --git a/typesense_codegen/src/models/update_documents_update_documents_parameters_parameter.rs b/typesense_codegen/src/models/update_documents_update_documents_parameters_parameter.rs deleted file mode 100644 index aacf476..0000000 --- a/typesense_codegen/src/models/update_documents_update_documents_parameters_parameter.rs +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Typesense API - * - * An open source search engine for building delightful search experiences. - * - * The version of the OpenAPI document: 0.25.0 - * - * Generated by: https://openapi-generator.tech - */ - -#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct UpdateDocumentsUpdateDocumentsParametersParameter { - #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, -} - -impl UpdateDocumentsUpdateDocumentsParametersParameter { - pub fn new() -> UpdateDocumentsUpdateDocumentsParametersParameter { - UpdateDocumentsUpdateDocumentsParametersParameter { filter_by: None } - } -} diff --git a/typesense_codegen/src/models/voice_query_model_collection_config.rs b/typesense_codegen/src/models/voice_query_model_collection_config.rs new file mode 100644 index 0000000..0e97ec0 --- /dev/null +++ b/typesense_codegen/src/models/voice_query_model_collection_config.rs @@ -0,0 +1,29 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +/// VoiceQueryModelCollectionConfig : Configuration for the voice query model +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct VoiceQueryModelCollectionConfig { + #[serde(rename = "model_name", skip_serializing_if = "Option::is_none")] + pub model_name: Option, +} + +impl VoiceQueryModelCollectionConfig { + /// Configuration for the voice query model + pub fn new() -> VoiceQueryModelCollectionConfig { + VoiceQueryModelCollectionConfig { + model_name: None, + } + } +} + diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml new file mode 100644 index 0000000..e5e1a5d --- /dev/null +++ b/xtask/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "xtask" +version = "0.1.0" +edition = "2018" + +[dependencies] +reqwest = { version = "0.11", features = ["blocking"] } # "blocking" is simpler for scripts +anyhow = "1.0" +clap = { version = "4.0", features = ["derive"] } diff --git a/xtask/src/main.rs b/xtask/src/main.rs new file mode 100644 index 0000000..7da5009 --- /dev/null +++ b/xtask/src/main.rs @@ -0,0 +1,124 @@ +use anyhow::{Context, Result}; +use clap::{Parser, ValueEnum}; +use std::env; +use std::fs; +use std::process::Command; + +const SPEC_URL: &str = + "https://raw.githubusercontent.com/typesense/typesense-go/master/typesense/api/generator/generator.yml"; +// Input spec file, expected in the project root. + +const INPUT_SPEC_FILE: &str = "typesense-go-unwrapped-api-spec.yaml"; + +// Output directory for the generated code. +const OUTPUT_DIR: &str = "typesense_codegen"; + +// 1. Define the command-line interface using clap's derive macros +#[derive(Parser)] +#[command( + author, + version, + about = "A task runner for the typesense-rust project" +)] +struct Cli { + /// The list of tasks to run in sequence. + #[arg(required = true, value_enum)] + tasks: Vec, +} + +// 2. Define the available tasks as a simple enum +#[derive(ValueEnum, Clone, Debug)] +#[clap(rename_all = "kebab-case")] // Allows user to type `code-gen` instead of `CodeGen` +enum Task { + /// Fetches the latest OpenAPI spec from the Typesense repository. + Fetch, + /// Generates client code from the spec file using the Docker container. + CodeGen, +} + +// 3. The main function now parses the CLI arguments and loops through the requested tasks +fn main() -> Result<()> { + let cli = Cli::parse(); + + for task in cli.tasks { + println!("▶️ Running task: {:?}", task); + match task { + Task::Fetch => task_fetch_api_spec()?, + Task::CodeGen => task_codegen()?, + } + } + Ok(()) +} + +fn task_fetch_api_spec() -> Result<()> { + println!("▶️ Running codegen task..."); + + // 1. Fetch the OpenAPI spec + println!(" - Downloading spec from {}", SPEC_URL); + let response = + reqwest::blocking::get(SPEC_URL).context("Failed to download OpenAPI spec file")?; + + if !response.status().is_success() { + anyhow::bail!("Failed to download spec: HTTP {}", response.status()); + } + + let spec_content = response.text()?; + fs::write(INPUT_SPEC_FILE, spec_content) + .context(format!("Failed to write spec to {}", INPUT_SPEC_FILE))?; + println!(" - Spec saved to {}", INPUT_SPEC_FILE); + + println!("✅ Fetch API spec task finished successfully."); + + Ok(()) +} + +/// Task to generate client code from the OpenAPI spec using a Docker container. +fn task_codegen() -> Result<()> { + println!("▶️ Running codegen task via Docker..."); + + // 1. Get the absolute path to the project's root directory. + // std::env::current_dir() gives us the directory from which `cargo xtask` was run. + let project_root = env::current_dir().context("Failed to get current directory")?; + + // Check if the input spec file exists before trying to run Docker. + let input_spec_path = project_root.join(INPUT_SPEC_FILE); + if !input_spec_path.exists() { + anyhow::bail!( + "Input spec '{}' not found in project root. Please add it before running.", + INPUT_SPEC_FILE + ); + } + + // 2. Construct the volume mount string for Docker. + // Docker needs an absolute path for the volume mount source. + // to_string_lossy() is used to handle potential non-UTF8 paths gracefully. + let volume_mount = format!("{}:/local", project_root.to_string_lossy()); + println!(" - Using volume mount: {}", volume_mount); + + // 4. Set up and run the Docker command. + println!(" - Starting Docker container..."); + let status = Command::new("docker") + .arg("run") + .arg("--rm") // Remove the container after it exits + .arg("-v") + .arg(volume_mount) // Mount the project root to /local in the container + .arg("openapitools/openapi-generator-cli") + .arg("generate") + .arg("-i") + .arg(format!("/local/{}", INPUT_SPEC_FILE)) // Input path inside the container + .arg("-g") + .arg("rust") // The language generator + .arg("-o") + .arg(format!("/local/{}", OUTPUT_DIR)) // Output path inside the container + .status() // Execute the command and wait for it to finish + .context("Failed to execute Docker command. Is Docker installed and running?")?; + + // 5. Check if the command was successful. + if !status.success() { + anyhow::bail!("Docker command failed with status: {}", status); + } + + println!("✅ Codegen task finished successfully."); + println!(" Generated code is available in '{}'", OUTPUT_DIR); + Ok(()) +} From 40814359e2a43dae3984220d89236a333314a078 Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Thu, 17 Jul 2025 17:43:15 +0700 Subject: [PATCH 02/21] feat: multi-node client --- ...-go-unwrapped-api-spec.yaml => openapi.yml | 6991 +++++++++-------- preprocessed_openapi.yml | 4481 +++++++++++ typesense/Cargo.toml | 10 +- typesense/src/client/analytics/events.rs | 46 + typesense/src/client/analytics/mod.rs | 44 + typesense/src/client/analytics/rule.rs | 56 + typesense/src/client/analytics/rules.rs | 79 + typesense/src/client/collection/document.rs | 83 + typesense/src/client/collection/documents.rs | 311 + typesense/src/client/collection/mod.rs | 145 + .../src/client/collection/search_override.rs | 65 + .../src/client/collection/search_overrides.rs | 70 + typesense/src/client/collection/synonym.rs | 65 + typesense/src/client/collection/synonyms.rs | 67 + typesense/src/client/collections.rs | 153 +- typesense/src/client/conversations/mod.rs | 41 + typesense/src/client/conversations/model.rs | 90 + typesense/src/client/conversations/models.rs | 62 + typesense/src/client/documents.rs | 0 typesense/src/client/key.rs | 56 + typesense/src/client/keys.rs | 55 + typesense/src/client/mod.rs | 487 +- typesense/src/client/multi_search.rs | 119 + typesense/src/client/operations.rs | 52 + typesense/src/client/preset.rs | 55 + typesense/src/client/presets.rs | 59 + typesense/src/client/stemming/dictionaries.rs | 62 + typesense/src/client/stemming/dictionary.rs | 59 + typesense/src/client/stemming/mod.rs | 37 + typesense/src/client/stopword.rs | 57 + typesense/src/client/stopwords.rs | 60 + typesense/src/collection_schema.rs | 1 + typesense/src/field/mod.rs | 1 + typesense/tests/client/mod.rs | 289 + typesense_codegen/.openapi-generator/FILES | 18 + typesense_codegen/Cargo.toml | 19 +- typesense_codegen/README.md | 13 + .../docs/AnalyticsEventCreateSchema.md | 4 +- .../docs/AnalyticsRuleParameters.md | 4 +- ...nalyticsRuleParametersSourceEventsInner.md | 2 +- typesense_codegen/docs/AnalyticsRuleSchema.md | 2 +- .../docs/AnalyticsRuleUpsertSchema.md | 2 +- typesense_codegen/docs/ApiKey.md | 4 +- typesense_codegen/docs/ApiKeySchema.md | 4 +- typesense_codegen/docs/CollectionAlias.md | 2 +- typesense_codegen/docs/CollectionResponse.md | 8 +- typesense_codegen/docs/CollectionSchema.md | 6 +- .../docs/ConversationModelCreateSchema.md | 8 +- .../docs/ConversationModelSchema.md | 8 +- .../docs/ConversationModelUpdateSchema.md | 8 +- ...mentsDeleteDocumentsParametersParameter.md | 14 + .../docs/DeleteDocumentsParameters.md | 14 + typesense_codegen/docs/DocumentsApi.md | 232 +- ...mentsExportDocumentsParametersParameter.md | 13 + .../docs/ExportDocumentsParameters.md | 13 + .../docs/FacetCountsCountsInner.md | 2 +- typesense_codegen/docs/FacetCountsStats.md | 2 +- typesense_codegen/docs/Field.md | 22 +- .../docs/FieldEmbedModelConfig.md | 10 +- ...mentsImportDocumentsParametersParameter.md | 16 + .../docs/ImportDocumentsParameters.md | 16 + .../docs/MultiSearchCollectionParameters.md | 96 +- .../docs/MultiSearchParameters.md | 94 +- typesense_codegen/docs/MultiSearchResult.md | 2 +- .../docs/MultiSearchResultItem.md | 10 +- .../docs/MultiSearchSearchesParameter.md | 2 +- typesense_codegen/docs/NlSearchModelBase.md | 28 + .../docs/NlSearchModelCreateSchema.md | 29 + .../docs/NlSearchModelDeleteSchema.md | 11 + typesense_codegen/docs/NlSearchModelSchema.md | 29 + typesense_codegen/docs/NlSearchModelsApi.md | 161 + typesense_codegen/docs/SchemaChangeStatus.md | 2 +- typesense_codegen/docs/ScopedKeyParameters.md | 2 +- typesense_codegen/docs/SearchHighlight.md | 4 +- typesense_codegen/docs/SearchOverride.md | 14 +- typesense_codegen/docs/SearchOverrideRule.md | 6 +- .../docs/SearchOverrideSchema.md | 14 +- typesense_codegen/docs/SearchParameters.md | 104 +- typesense_codegen/docs/SearchResult.md | 10 +- typesense_codegen/docs/SearchResultHit.md | 6 +- .../docs/SearchResultRequestParams.md | 2 +- typesense_codegen/docs/SearchSynonym.md | 4 +- typesense_codegen/docs/SearchSynonymSchema.md | 4 +- .../docs/StemmingDictionaryWordsInner.md | 2 +- typesense_codegen/docs/StopwordsSetSchema.md | 2 +- .../docs/StopwordsSetUpsertSchema.md | 2 +- .../docs/UpdateDocumentsParameters.md | 11 + ...mentsUpdateDocumentsParametersParameter.md | 11 + typesense_codegen/src/apis/analytics_api.rs | 72 +- typesense_codegen/src/apis/collections_api.rs | 105 +- typesense_codegen/src/apis/configuration.rs | 4 +- .../src/apis/conversations_api.rs | 57 +- typesense_codegen/src/apis/curation_api.rs | 50 +- typesense_codegen/src/apis/debug_api.rs | 2 +- typesense_codegen/src/apis/documents_api.rs | 1097 +-- typesense_codegen/src/apis/health_api.rs | 2 +- typesense_codegen/src/apis/keys_api.rs | 41 +- typesense_codegen/src/apis/mod.rs | 10 + .../src/apis/nl_search_models_api.rs | 305 + typesense_codegen/src/apis/operations_api.rs | 21 +- typesense_codegen/src/apis/override_api.rs | 16 +- typesense_codegen/src/apis/presets_api.rs | 46 +- typesense_codegen/src/apis/stemming_api.rs | 33 +- typesense_codegen/src/apis/stopwords_api.rs | 46 +- typesense_codegen/src/apis/synonyms_api.rs | 66 +- .../models/analytics_event_create_schema.rs | 14 +- .../src/models/analytics_rule_parameters.rs | 14 +- ...ics_rule_parameters_source_events_inner.rs | 8 +- .../src/models/analytics_rule_schema.rs | 8 +- .../models/analytics_rule_upsert_schema.rs | 8 +- typesense_codegen/src/models/api_key.rs | 14 +- .../src/models/api_key_schema.rs | 14 +- .../src/models/collection_alias.rs | 10 +- .../src/models/collection_response.rs | 34 +- .../src/models/collection_schema.rs | 26 +- .../conversation_model_create_schema.rs | 34 +- .../src/models/conversation_model_schema.rs | 34 +- .../conversation_model_update_schema.rs | 32 +- ...s_delete_documents_parameters_parameter.rs | 38 + .../src/models/delete_documents_parameters.rs | 38 + ...s_export_documents_parameters_parameter.rs | 36 + .../src/models/export_documents_parameters.rs | 36 + .../src/models/facet_counts_counts_inner.rs | 6 +- .../src/models/facet_counts_stats.rs | 6 +- typesense_codegen/src/models/field.rs | 74 +- .../src/models/field_embed_model_config.rs | 30 +- ...s_import_documents_parameters_parameter.rs | 43 + .../src/models/import_documents_parameters.rs | 43 + typesense_codegen/src/models/mod.rs | 16 + .../multi_search_collection_parameters.rs | 382 +- .../src/models/multi_search_parameters.rs | 374 +- .../src/models/multi_search_result.rs | 6 +- .../src/models/multi_search_result_item.rs | 36 +- .../models/multi_search_searches_parameter.rs | 6 +- .../src/models/nl_search_model_base.rs | 96 + .../models/nl_search_model_create_schema.rs | 100 + .../models/nl_search_model_delete_schema.rs | 28 + .../src/models/nl_search_model_schema.rs | 100 + .../src/models/schema_change_status.rs | 8 +- .../src/models/scoped_key_parameters.rs | 6 +- .../src/models/search_highlight.rs | 14 +- .../src/models/search_override.rs | 54 +- .../src/models/search_override_rule.rs | 24 +- .../src/models/search_override_schema.rs | 54 +- .../src/models/search_parameters.rs | 414 +- typesense_codegen/src/models/search_result.rs | 36 +- .../src/models/search_result_hit.rs | 24 +- .../models/search_result_request_params.rs | 8 +- .../src/models/search_synonym.rs | 16 +- .../src/models/search_synonym_schema.rs | 16 +- .../models/stemming_dictionary_words_inner.rs | 10 +- .../src/models/stopwords_set_schema.rs | 6 +- .../src/models/stopwords_set_upsert_schema.rs | 6 +- .../src/models/update_documents_parameters.rs | 27 + ...s_update_documents_parameters_parameter.rs | 27 + xtask/Cargo.toml | 4 +- xtask/src/main.rs | 28 +- xtask/src/preprocess_openapi.rs | 256 + 158 files changed, 14609 insertions(+), 5582 deletions(-) rename typesense-go-unwrapped-api-spec.yaml => openapi.yml (61%) create mode 100644 preprocessed_openapi.yml create mode 100644 typesense/src/client/analytics/events.rs create mode 100644 typesense/src/client/analytics/mod.rs create mode 100644 typesense/src/client/analytics/rule.rs create mode 100644 typesense/src/client/analytics/rules.rs create mode 100644 typesense/src/client/collection/document.rs create mode 100644 typesense/src/client/collection/documents.rs create mode 100644 typesense/src/client/collection/mod.rs create mode 100644 typesense/src/client/collection/search_override.rs create mode 100644 typesense/src/client/collection/search_overrides.rs create mode 100644 typesense/src/client/collection/synonym.rs create mode 100644 typesense/src/client/collection/synonyms.rs create mode 100644 typesense/src/client/conversations/mod.rs create mode 100644 typesense/src/client/conversations/model.rs create mode 100644 typesense/src/client/conversations/models.rs delete mode 100644 typesense/src/client/documents.rs create mode 100644 typesense/src/client/key.rs create mode 100644 typesense/src/client/keys.rs create mode 100644 typesense/src/client/multi_search.rs create mode 100644 typesense/src/client/operations.rs create mode 100644 typesense/src/client/preset.rs create mode 100644 typesense/src/client/presets.rs create mode 100644 typesense/src/client/stemming/dictionaries.rs create mode 100644 typesense/src/client/stemming/dictionary.rs create mode 100644 typesense/src/client/stemming/mod.rs create mode 100644 typesense/src/client/stopword.rs create mode 100644 typesense/src/client/stopwords.rs create mode 100644 typesense/tests/client/mod.rs create mode 100644 typesense_codegen/docs/DeleteDocumentsDeleteDocumentsParametersParameter.md create mode 100644 typesense_codegen/docs/DeleteDocumentsParameters.md create mode 100644 typesense_codegen/docs/ExportDocumentsExportDocumentsParametersParameter.md create mode 100644 typesense_codegen/docs/ExportDocumentsParameters.md create mode 100644 typesense_codegen/docs/ImportDocumentsImportDocumentsParametersParameter.md create mode 100644 typesense_codegen/docs/ImportDocumentsParameters.md create mode 100644 typesense_codegen/docs/NlSearchModelBase.md create mode 100644 typesense_codegen/docs/NlSearchModelCreateSchema.md create mode 100644 typesense_codegen/docs/NlSearchModelDeleteSchema.md create mode 100644 typesense_codegen/docs/NlSearchModelSchema.md create mode 100644 typesense_codegen/docs/NlSearchModelsApi.md create mode 100644 typesense_codegen/docs/UpdateDocumentsParameters.md create mode 100644 typesense_codegen/docs/UpdateDocumentsUpdateDocumentsParametersParameter.md create mode 100644 typesense_codegen/src/apis/nl_search_models_api.rs create mode 100644 typesense_codegen/src/models/delete_documents_delete_documents_parameters_parameter.rs create mode 100644 typesense_codegen/src/models/delete_documents_parameters.rs create mode 100644 typesense_codegen/src/models/export_documents_export_documents_parameters_parameter.rs create mode 100644 typesense_codegen/src/models/export_documents_parameters.rs create mode 100644 typesense_codegen/src/models/import_documents_import_documents_parameters_parameter.rs create mode 100644 typesense_codegen/src/models/import_documents_parameters.rs create mode 100644 typesense_codegen/src/models/nl_search_model_base.rs create mode 100644 typesense_codegen/src/models/nl_search_model_create_schema.rs create mode 100644 typesense_codegen/src/models/nl_search_model_delete_schema.rs create mode 100644 typesense_codegen/src/models/nl_search_model_schema.rs create mode 100644 typesense_codegen/src/models/update_documents_parameters.rs create mode 100644 typesense_codegen/src/models/update_documents_update_documents_parameters_parameter.rs create mode 100644 xtask/src/preprocess_openapi.rs diff --git a/typesense-go-unwrapped-api-spec.yaml b/openapi.yml similarity index 61% rename from typesense-go-unwrapped-api-spec.yaml rename to openapi.yml index c9b3c18..3c23cba 100644 --- a/typesense-go-unwrapped-api-spec.yaml +++ b/openapi.yml @@ -1,3963 +1,4030 @@ -components: - schemas: - APIStatsResponse: - properties: - delete_latency_ms: - format: double - type: number - delete_requests_per_second: - format: double - type: number - import_latency_ms: - format: double - type: number - import_requests_per_second: - format: double - type: number - latency_ms: - type: object - x-go-type: map[string]float64 - overloaded_requests_per_second: - format: double - type: number - pending_write_batches: - format: double - type: number - requests_per_second: - type: object - x-go-type: map[string]float64 - search_latency_ms: - format: double - type: number - search_requests_per_second: - format: double - type: number - total_requests_per_second: - format: double - type: number - write_latency_ms: - format: double - type: number - write_requests_per_second: - format: double - type: number - type: object - AnalyticsEventCreateResponse: - properties: - ok: - type: boolean - required: - - ok - type: object - AnalyticsEventCreateSchema: - properties: - data: - type: object - name: - type: string - type: - type: string - required: - - type - - name - - data - type: object - AnalyticsRuleDeleteResponse: - properties: - name: - type: string - required: - - name - type: object - AnalyticsRuleParameters: - properties: - destination: - $ref: '#/components/schemas/AnalyticsRuleParametersDestination' - expand_query: - type: boolean - limit: - type: integer - source: - $ref: '#/components/schemas/AnalyticsRuleParametersSource' - required: - - source - - destination - type: object - AnalyticsRuleParametersDestination: - properties: - collection: - type: string - counter_field: - type: string - required: - - collection - type: object - AnalyticsRuleParametersSource: - properties: - collections: - items: - type: string - type: array - events: - items: - properties: - name: - type: string - type: - type: string - weight: - format: float - type: number - required: - - type - - weight - - name - type: object - type: array - required: +openapi: 3.0.3 +info: + title: Typesense API + description: "An open source search engine for building delightful search experiences." + version: '28.0' +externalDocs: + description: Find out more about Typsesense + url: https://typesense.org +security: + - api_key_header: [] +tags: + - name: collections + description: A collection is defined by a schema + externalDocs: + description: Find out more + url: https://typesense.org/api/#create-collection + - name: documents + description: A document is an individual record to be indexed and belongs to a collection + externalDocs: + description: Find out more + url: https://typesense.org/api/#index-document + - name: curation + description: Hand-curate search results based on conditional business rules + externalDocs: + description: Find out more + url: https://typesense.org/docs/0.23.0/api/#curation + - name: analytics + description: Typesense can aggregate search queries for both analytics purposes and for query suggestions. + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/analytics-query-suggestions.html + - name: keys + description: Manage API Keys with fine-grain access control + externalDocs: + description: Find out more + url: https://typesense.org/docs/0.23.0/api/#api-keys + - name: debug + description: Debugging information + - name: operations + description: Manage Typesense cluster + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/cluster-operations.html + - name: stopwords + description: Manage stopwords sets + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/stopwords.html + - name: presets + description: Store and reference search parameters + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/search.html#presets + - name: conversations + description: Conversational Search (RAG) + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/conversational-search-rag.html + - name: synonyms + description: Manage synonyms + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/synonyms.html + - name: stemming + description: Manage stemming dictionaries + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/stemming.html + - name: nl_search_models + description: Manage NL search models + externalDocs: + description: Find out more + url: https://typesense.org/docs/29.0/api/natural-language-search.html +paths: + /collections: + get: + tags: - collections - type: object - AnalyticsRuleSchema: - allOf: - - $ref: '#/components/schemas/AnalyticsRuleUpsertSchema' - - properties: - name: - type: string - required: - - name - type: object - AnalyticsRuleUpsertSchema: - properties: - params: - $ref: '#/components/schemas/AnalyticsRuleParameters' - type: - enum: - - popular_queries - - nohits_queries - - counter - type: string - required: - - type - - params - type: object - AnalyticsRulesRetrieveSchema: - properties: - rules: - items: - $ref: '#/components/schemas/AnalyticsRuleSchema' - type: array - x-go-type: '[]*AnalyticsRuleSchema' - type: object - ApiKey: - allOf: - - $ref: '#/components/schemas/ApiKeySchema' - - properties: - id: - format: int64 - readOnly: true - type: integer - value_prefix: - readOnly: true - type: string - type: object - ApiKeyDeleteResponse: - properties: - id: - description: The id of the API key that was deleted - format: int64 - type: integer - required: - - id - type: object - ApiKeySchema: - properties: - actions: - items: - type: string - type: array - collections: - items: - type: string - type: array - description: - type: string - expires_at: - format: int64 - type: integer - value: - type: string - required: - - actions + summary: List all collections + description: + Returns a summary of all your collections. The collections are + returned sorted by creation date, with the most recent collections appearing + first. + operationId: getCollections + responses: + '200': + description: List of all collections + content: + application/json: + schema: + type: array + x-go-type: "[]*CollectionResponse" + items: + $ref: "#/components/schemas/CollectionResponse" + post: + tags: - collections - - description - type: object - ApiKeysResponse: - properties: - keys: - items: - $ref: '#/components/schemas/ApiKey' - type: array - x-go-type: '[]*ApiKey' - required: - - keys - type: object - ApiResponse: - properties: - message: - type: string - required: - - message - type: object - CollectionAlias: - properties: - collection_name: - description: Name of the collection the alias mapped to - type: string - name: - description: Name of the collection alias - readOnly: true - type: string - required: - - collection_name - - name - type: object - CollectionAliasSchema: - properties: - collection_name: - description: Name of the collection you wish to map the alias to - type: string - required: - - collection_name - type: object - CollectionAliasesResponse: - properties: - aliases: - items: - $ref: '#/components/schemas/CollectionAlias' - type: array - x-go-type: '[]*CollectionAlias' - required: - - aliases - type: object - CollectionResponse: - allOf: - - $ref: '#/components/schemas/CollectionSchema' - - properties: - created_at: - description: Timestamp of when the collection was created (Unix epoch in seconds) - format: int64 - readOnly: true - type: integer - num_documents: - description: Number of documents in the collection - format: int64 - readOnly: true - type: integer - required: - - num_documents - - created_at - type: object - CollectionSchema: - properties: - default_sorting_field: - default: "" - description: The name of an int32 / float field that determines the order in which the search results are ranked when a sort_by clause is not provided during searching. This field must indicate some kind of popularity. - example: num_employees - type: string - enable_nested_fields: - default: false - description: Enables experimental support at a collection level for nested object or object array fields. This field is only available if the Typesense server is version `0.24.0.rcn34` or later. - example: true - type: boolean - fields: - description: A list of fields for querying, filtering and faceting - example: - - facet: false - name: num_employees - type: int32 - - facet: false - name: company_name - type: string - - facet: true - name: country - type: string - items: - $ref: '#/components/schemas/Field' - type: array - name: - description: Name of the collection - example: companies - type: string - symbols_to_index: - default: [] - description: | - List of symbols or special characters to be indexed. - items: - maxLength: 1 - minLength: 1 + summary: Create a new collection + description: + When a collection is created, we give it a name and describe the + fields that will be indexed from the documents added to the collection. + operationId: createCollection + requestBody: + description: The collection object to be created + content: + application/json: + schema: + $ref: "#/components/schemas/CollectionSchema" + required: true + responses: + '201': + description: Collection successfully created + content: + application/json: + schema: + $ref: "#/components/schemas/CollectionResponse" + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + '409': + description: Collection already exists + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + /collections/{collectionName}: + get: + tags: + - collections + summary: Retrieve a single collection + description: Retrieve the details of a collection, given its name. + operationId: getCollection + parameters: + - name: collectionName + in: path + description: The name of the collection to retrieve + required: true + schema: type: string - type: array - token_separators: - default: [] - description: | - List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. - items: - maxLength: 1 - minLength: 1 + responses: + '200': + description: Collection fetched + content: + application/json: + schema: + $ref: "#/components/schemas/CollectionResponse" + '404': + description: Collection not found + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + patch: + tags: + - collections + summary: Update a collection + description: + Update a collection's schema to modify the fields and their types. + operationId: updateCollection + parameters: + - name: collectionName + in: path + description: The name of the collection to update + required: true + schema: type: string - type: array - voice_query_model: - $ref: '#/components/schemas/VoiceQueryModelCollectionConfig' - required: - - name - - fields - type: object - CollectionUpdateSchema: - properties: - fields: - description: A list of fields for querying, filtering and faceting - example: - - facet: false - name: company_name - type: string - - facet: false - name: num_employees - type: int32 - - facet: true - name: country - type: string - items: - $ref: '#/components/schemas/Field' - type: array - required: - - fields - type: object - ConversationModelCreateSchema: - allOf: - - $ref: '#/components/schemas/ConversationModelUpdateSchema' - - properties: - history_collection: - description: Typesense collection that stores the historical conversations - type: string - max_bytes: - description: | - The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. - type: integer - model_name: - description: Name of the LLM model offered by OpenAI, Cloudflare or vLLM - type: string - required: - - model_name - - max_bytes - - history_collection - type: object - required: - - model_name - - max_bytes - ConversationModelSchema: - allOf: - - $ref: '#/components/schemas/ConversationModelCreateSchema' - - properties: - id: - description: An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. - type: string - required: - - id - type: object - ConversationModelUpdateSchema: - properties: - account_id: - description: LLM service's account ID (only applicable for Cloudflare) - type: string - api_key: - description: The LLM service's API Key - type: string - history_collection: - description: Typesense collection that stores the historical conversations - type: string - id: - description: An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. - type: string - max_bytes: - description: | - The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. - type: integer - model_name: - description: Name of the LLM model offered by OpenAI, Cloudflare or vLLM - type: string - system_prompt: - description: The system prompt that contains special instructions to the LLM - type: string - ttl: - description: | - Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) - type: integer - vllm_url: - description: URL of vLLM service - type: string - type: object - DirtyValues: - enum: - - coerce_or_reject - - coerce_or_drop - - drop - - reject - type: string - DocumentIndexParameters: - properties: - dirty_values: - $ref: '#/components/schemas/DirtyValues' - type: object - DropTokensMode: - description: | - Dictates the direction in which the words in the query must be dropped when the original words in the query do not appear in any document. Values: right_to_left (default), left_to_right, both_sides:3 A note on both_sides:3 - for queries upto 3 tokens (words) in length, this mode will drop tokens from both sides and exhaustively rank all matching results. If query length is greater than 3 words, Typesense will just fallback to default behavior of right_to_left - enum: - - right_to_left - - left_to_right - - both_sides:3 - type: string - ErrorResponse: - properties: - message: - type: string - type: object - FacetCounts: - properties: - counts: - items: - properties: - count: - type: integer - highlighted: - type: string - parent: - type: object - value: - type: string - type: object - type: array - field_name: - type: string - stats: - properties: - avg: - format: double - type: number - max: - format: double - type: number - min: - format: double - type: number - sum: - format: double - type: number - total_values: - type: integer - type: object - type: object - Field: - properties: - drop: - example: true - type: boolean - embed: - properties: - from: - items: - type: string - type: array - model_config: - properties: - access_token: - type: string - api_key: - type: string - client_id: - type: string - client_secret: - type: string - indexing_prefix: - type: string - model_name: - type: string - project_id: - type: string - query_prefix: - type: string - refresh_token: - type: string - url: - type: string - required: - - model_name + requestBody: + description: The document object with fields to be updated + content: + application/json: + schema: + $ref: "#/components/schemas/CollectionUpdateSchema" + required: true + responses: + '200': + description: The updated partial collection schema + content: + application/json: + schema: + $ref: "#/components/schemas/CollectionUpdateSchema" + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + '404': + description: The collection was not found + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + delete: + tags: + - collections + summary: Delete a collection + description: + Permanently drops a collection. This action cannot be undone. For + large collections, this might have an impact on read latencies. + operationId: deleteCollection + parameters: + - name: collectionName + in: path + description: The name of the collection to delete + required: true + schema: + type: string + responses: + '200': + description: Collection deleted + content: + application/json: + schema: + $ref: "#/components/schemas/CollectionResponse" + '404': + description: Collection not found + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + /collections/{collectionName}/documents: + post: + tags: + - documents + summary: Index a document + description: + A document to be indexed in a given collection must conform to + the schema of the collection. + operationId: indexDocument + parameters: + - name: collectionName + in: path + description: The name of the collection to add the document to + required: true + schema: + type: string + - name: action + in: query + description: Additional action to perform + schema: + type: string + example: upsert + $ref: "#/components/schemas/IndexAction" + - name: dirty_values + in: query + description: Dealing with Dirty Data + schema: + $ref: "#/components/schemas/DirtyValues" + requestBody: + description: The document object to be indexed + content: + application/json: + schema: type: object - required: - - from - - model_config - type: object - facet: - example: false - type: boolean - index: - default: true - example: true - type: boolean - infix: - default: false - example: true - type: boolean - locale: - example: el - type: string - name: - example: company_name - type: string - num_dim: - example: 256 - type: integer - optional: - example: true - type: boolean - range_index: - description: | - Enables an index optimized for range filtering on numerical fields (e.g. rating:>3.5). Default: false. - type: boolean - reference: - description: | - Name of a field in another collection that should be linked to this collection so that it can be joined during query. - type: string - sort: - example: true - type: boolean - stem: - description: | - Values are stemmed before indexing in-memory. Default: false. - type: boolean - stem_dictionary: - description: Name of the stemming dictionary to use for this field - example: irregular-plurals - type: string - store: - description: | - When set to false, the field value will not be stored on disk. Default: true. - type: boolean - symbols_to_index: - default: [] - description: | - List of symbols or special characters to be indexed. - items: - maxLength: 1 - minLength: 1 + description: Can be any key-value pair + x-go-type: "interface{}" + required: true + responses: + '201': + description: Document successfully created/indexed + content: + application/json: + schema: + type: object + description: Can be any key-value pair + '404': + description: Collection not found + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + patch: + tags: + - documents + summary: Update documents with conditional query + description: + The filter_by query parameter is used to filter to specify a condition against which the documents are matched. + The request body contains the fields that should be updated for any documents that match the filter condition. + This endpoint is only available if the Typesense server is version `0.25.0.rc12` or later. + operationId: updateDocuments + parameters: + - name: collectionName + in: path + description: The name of the collection to update documents in + required: true + schema: type: string - type: array - token_separators: - default: [] - description: | - List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. - items: - maxLength: 1 - minLength: 1 + - name: updateDocumentsParameters + in: query + schema: + type: object + properties: + filter_by: + type: string + example: "num_employees:>100 && country: [USA, UK]" + responses: + '200': + description: + The response contains a single field, `num_updated`, indicating the number of documents affected. + content: + application/json: + schema: + type: object + required: + - num_updated + properties: + num_updated: + type: integer + description: The number of documents that have been updated + example: 1 + '400': + description: 'Bad request, see error message for details' + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + '404': + description: The collection was not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + requestBody: + description: The document fields to be updated + content: + application/json: + schema: + type: object + description: Can be any key-value pair + x-go-type: "interface{}" + required: true + delete: + tags: + - documents + summary: Delete a bunch of documents + description: + Delete a bunch of documents that match a specific filter condition. + Use the `batch_size` parameter to control the number of documents that + should deleted at a time. A larger value will speed up deletions, but will + impact performance of other operations running on the server. + operationId: deleteDocuments + parameters: + - name: collectionName + in: path + description: The name of the collection to delete documents from + required: true + schema: type: string - type: array - type: - example: string - type: string - vec_dist: - description: | - The distance metric to be used for vector search. Default: `cosine`. You can also use `ip` for inner product. - type: string - required: - - name - - type - type: object - HealthStatus: - properties: - ok: - type: boolean - required: - - ok - type: object - IndexAction: - enum: - - create - - update - - upsert - - emplace - type: string - MultiSearchCollectionParameters: - allOf: - - $ref: '#/components/schemas/MultiSearchParameters' - - properties: - collection: - description: | - The collection to search in. - type: string - rerank_hybrid_matches: - default: false - description: | - When true, computes both text match and vector distance scores for all matches in hybrid search. Documents found only through keyword search will get a vector distance score, and documents found only through vector search will get a text match score. - type: boolean - x-typesense-api-key: - description: A separate search API key for each search within a multi_search request - type: string - type: object - MultiSearchParameters: - description: | - Parameters for the multi search API. - properties: - cache_ttl: - description: | - The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. - type: integer - conversation: - description: | - Enable conversational search. - type: boolean - conversation_id: - description: | - The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. - type: string - conversation_model_id: - description: | - The Id of Conversation Model to be used. - type: string - drop_tokens_mode: - $ref: '#/components/schemas/DropTokensMode' - drop_tokens_threshold: - description: | - If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 - type: integer - enable_overrides: - default: false - description: | - If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false - type: boolean - enable_synonyms: - description: | - If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true - type: boolean - enable_typos_for_alpha_numerical_tokens: - description: | - Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. - type: boolean - enable_typos_for_numerical_tokens: - default: true - description: | - Make Typesense disable typos for numerical tokens. - type: boolean - exclude_fields: - description: List of fields from the document to exclude in the search result - type: string - exhaustive_search: - description: | - Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). - type: boolean - facet_by: - description: A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. - type: string - facet_query: - description: Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix "shoe". - type: string - facet_return_parent: - description: | - Comma separated string of nested facet fields whose parent object should be returned in facet response. - type: string - facet_strategy: - description: | - Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). - type: string - filter_by: - description: Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. - example: 'num_employees:>100 && country: [USA, UK]' - type: string - filter_curated_hits: - description: | - Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false - type: boolean - group_by: - description: You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. - type: string - group_limit: - description: | - Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 - type: integer - group_missing_values: - description: | - Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true - type: boolean - hidden_hits: - description: | - A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. - You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - type: string - highlight_affix_num_tokens: - description: | - The number of tokens that should surround the highlighted text on each side. Default: 4 - type: integer - highlight_end_tag: - description: | - The end tag used for the highlighted snippets. Default: `` - type: string - highlight_fields: - description: | - A list of custom fields that must be highlighted even if you don't query for them - type: string - highlight_full_fields: - description: List of fields which should be highlighted fully without snippeting - type: string - highlight_start_tag: - description: | - The start tag used for the highlighted snippets. Default: `` - type: string - include_fields: - description: List of fields from the document to include in the search result - type: string - infix: - description: If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results - type: string - limit: - description: | - Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. - type: integer - max_extra_prefix: - description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. - type: integer - max_extra_suffix: - description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. - type: integer - max_facet_values: - description: Maximum number of facet values to be returned. - type: integer - min_len_1typo: - description: | - Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - type: integer - min_len_2typo: - description: | - Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - type: integer - num_typos: - description: | - The number of typographical errors (1 or 2) that would be tolerated. Default: 2 - type: string - offset: - description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. - type: integer - override_tags: - description: Comma separated list of tags to trigger the curations rules that match the tags. - type: string - page: - description: Results from this specific page number would be fetched. - type: integer - per_page: - description: 'Number of results to fetch per page. Default: 10' - type: integer - pinned_hits: - description: | - A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. - You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - type: string - pre_segmented_query: - default: false - description: | - You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. - Set this parameter to true to do the same - type: boolean - prefix: - description: Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. - type: string - preset: - description: | - Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. - type: string - prioritize_exact_match: - default: true - description: | - Set this parameter to true to ensure that an exact match is ranked above the others - type: boolean - prioritize_num_matching_fields: - default: true - description: | - Make Typesense prioritize documents where the query words appear in more number of fields. - type: boolean - prioritize_token_position: - default: false - description: | - Make Typesense prioritize documents where the query words appear earlier in the text. - type: boolean - q: - description: The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. - type: string - query_by: - description: A list of `string` fields that should be queried against. Multiple fields are separated with a comma. - type: string - query_by_weights: - description: The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. - type: string - remote_embedding_num_tries: - description: | - Number of times to retry fetching remote embeddings. - type: integer - remote_embedding_timeout_ms: - description: | - Timeout (in milliseconds) for fetching remote embeddings. - type: integer - search_cutoff_ms: - description: | - Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. - type: integer - snippet_threshold: - description: | - Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 - type: integer - sort_by: - description: A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` - type: string - stopwords: - description: | - Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. - type: string - synonym_num_typos: - description: | - Allow synonym resolution on typo-corrected words in the query. Default: 0 - type: integer - synonym_prefix: - description: | - Allow synonym resolution on word prefixes in the query. Default: false - type: boolean - text_match_type: - description: In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. - type: string - typo_tokens_threshold: - description: | - If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 - type: integer - use_cache: - description: | - Enable server side caching of search query results. By default, caching is disabled. - type: boolean - vector_query: - description: | - Vector query expression for fetching documents "closest" to a given query/document vector. - type: string - voice_query: - description: | - The base64 encoded audio file in 16 khz 16-bit WAV format. - type: string - type: object - MultiSearchResult: - properties: - conversation: - $ref: '#/components/schemas/SearchResultConversation' - results: - items: - $ref: '#/components/schemas/MultiSearchResultItem' - type: array - required: - - results - type: object - MultiSearchResultItem: - allOf: - - $ref: '#/components/schemas/SearchResult' - - properties: - code: - description: HTTP error code - format: int64 - type: integer - error: - description: Error description - type: string - type: object - MultiSearchSearchesParameter: - properties: - searches: - items: - $ref: '#/components/schemas/MultiSearchCollectionParameters' - type: array - union: - description: When true, merges the search results from each search query into a single ordered set of hits. - type: boolean - required: - - searches - type: object - PresetDeleteSchema: - properties: - name: - type: string - required: - - name - type: object - PresetSchema: - allOf: - - $ref: '#/components/schemas/PresetUpsertSchema' - - properties: - name: - type: string - required: - - name - type: object - PresetUpsertSchema: - properties: - value: - oneOf: - - $ref: '#/components/schemas/SearchParameters' - - $ref: '#/components/schemas/MultiSearchSearchesParameter' - required: - - value - PresetsRetrieveSchema: - properties: - presets: - items: - $ref: '#/components/schemas/PresetSchema' - type: array - x-go-type: '[]*PresetSchema' - required: - - presets - type: object - SchemaChangeStatus: - properties: - altered_docs: - description: Number of documents that have been altered - type: integer - collection: - description: Name of the collection being modified - type: string - validated_docs: - description: Number of documents that have been validated - type: integer - type: object - ScopedKeyParameters: - properties: - expires_at: - format: int64 - type: integer - filter_by: - type: string - type: object - SearchGroupedHit: - properties: - found: - type: integer - group_key: - items: {} - type: array - hits: - description: The documents that matched the search query - items: - $ref: '#/components/schemas/SearchResultHit' - type: array - required: - - group_key - - hits - type: object - SearchHighlight: - properties: - field: - example: company_name - type: string - indices: - description: The indices property will be present only for string[] fields and will contain the corresponding indices of the snippets in the search field - example: 1 - items: - type: integer - type: array - matched_tokens: - items: - type: object - x-go-type: interface{} - type: array - snippet: - description: Present only for (non-array) string fields - example: Stark Industries - type: string - snippets: - description: Present only for (array) string[] fields - example: - - Stark Industries - - Stark Corp - items: - type: string - type: array - value: - description: Full field value with highlighting, present only for (non-array) string fields - example: Stark Industries is a major supplier of space equipment. - type: string - values: - description: Full field value with highlighting, present only for (array) string[] fields - example: - - Stark Industries - - Stark Corp - items: - type: string - type: array - type: object - SearchOverride: - allOf: - - $ref: '#/components/schemas/SearchOverrideSchema' - - properties: - id: - readOnly: true - type: string - required: - - id - type: object - SearchOverrideDeleteResponse: - properties: - id: - description: The id of the override that was deleted - type: string - required: - - id - type: object - SearchOverrideExclude: - properties: - id: - description: document id that should be excluded from the search results. - type: string - required: - - id - type: object - SearchOverrideInclude: - properties: - id: - description: document id that should be included - type: string - position: - description: position number where document should be included in the search results - type: integer - required: - - id - - position - type: object - SearchOverrideRule: - properties: - filter_by: - description: | - Indicates that the override should apply when the filter_by parameter in a search query exactly matches the string specified here (including backticks, spaces, brackets, etc). - type: string - match: - description: | - Indicates whether the match on the query term should be `exact` or `contains`. If we want to match all queries that contained the word `apple`, we will use the `contains` match instead. - enum: - - exact - - contains - type: string - query: - description: Indicates what search queries should be overridden - type: string - tags: - description: List of tag values to associate with this override rule. - items: - type: string - type: array - type: object - SearchOverrideSchema: - properties: - effective_from_ts: - description: | - A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. - type: integer - effective_to_ts: - description: | - A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. - type: integer - excludes: - description: List of document `id`s that should be excluded from the search results. - items: - $ref: '#/components/schemas/SearchOverrideExclude' - type: array - filter_by: - description: | - A filter by clause that is applied to any search query that matches the override rule. - type: string - filter_curated_hits: - description: | - When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. - type: boolean - includes: - description: List of document `id`s that should be included in the search results with their corresponding `position`s. - items: - $ref: '#/components/schemas/SearchOverrideInclude' - type: array - metadata: - description: | - Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. - type: object - remove_matched_tokens: - description: | - Indicates whether search query tokens that exist in the override's rule should be removed from the search query. - type: boolean - replace_query: - description: | - Replaces the current search query with this value, when the search query matches the override rule. - type: string - rule: - $ref: '#/components/schemas/SearchOverrideRule' - sort_by: - description: | - A sort by clause that is applied to any search query that matches the override rule. - type: string - stop_processing: - description: | - When set to true, override processing will stop at the first matching rule. When set to false override processing will continue and multiple override actions will be triggered in sequence. Overrides are processed in the lexical sort order of their id field. Default: true. - type: boolean - required: - - rule - type: object - SearchOverridesResponse: - properties: - overrides: - items: - $ref: '#/components/schemas/SearchOverride' - type: array - x-go-type: '[]*SearchOverride' - required: - - overrides - type: object - SearchParameters: - properties: - cache_ttl: - description: | - The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. - type: integer - conversation: - description: | - Enable conversational search. - type: boolean - conversation_id: - description: | - The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. - type: string - conversation_model_id: - description: | - The Id of Conversation Model to be used. - type: string - drop_tokens_mode: - $ref: '#/components/schemas/DropTokensMode' - drop_tokens_threshold: - description: | - If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 - type: integer - enable_highlight_v1: - default: true - description: | - Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true - type: boolean - enable_overrides: - default: false - description: | - If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false - type: boolean - enable_synonyms: - description: | - If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true - type: boolean - enable_typos_for_alpha_numerical_tokens: - description: | - Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. - type: boolean - enable_typos_for_numerical_tokens: - default: true - description: | - Make Typesense disable typos for numerical tokens. - type: boolean - exclude_fields: - description: List of fields from the document to exclude in the search result - type: string - exhaustive_search: - description: | - Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). - type: boolean - facet_by: - description: A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. - type: string - facet_query: - description: Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix "shoe". - type: string - facet_return_parent: - description: | - Comma separated string of nested facet fields whose parent object should be returned in facet response. - type: string - facet_strategy: - description: | - Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). - type: string - filter_by: - description: Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. - example: 'num_employees:>100 && country: [USA, UK]' - type: string - filter_curated_hits: - description: | - Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false - type: boolean - group_by: - description: You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. - type: string - group_limit: - description: | - Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 - type: integer - group_missing_values: - description: | - Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true - type: boolean - hidden_hits: - description: | - A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. - You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - type: string - highlight_affix_num_tokens: - description: | - The number of tokens that should surround the highlighted text on each side. Default: 4 - type: integer - highlight_end_tag: - description: | - The end tag used for the highlighted snippets. Default: `` - type: string - highlight_fields: - description: | - A list of custom fields that must be highlighted even if you don't query for them - type: string - highlight_full_fields: - description: List of fields which should be highlighted fully without snippeting - type: string - highlight_start_tag: - description: | - The start tag used for the highlighted snippets. Default: `` - type: string - include_fields: - description: List of fields from the document to include in the search result - type: string - infix: - description: If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results - type: string - limit: - description: | - Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. - type: integer - max_candidates: - description: | - Control the number of words that Typesense considers for typo and prefix searching. - type: integer - max_extra_prefix: - description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. - type: integer - max_extra_suffix: - description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. - type: integer - max_facet_values: - description: Maximum number of facet values to be returned. - type: integer - max_filter_by_candidates: - description: Controls the number of similar words that Typesense considers during fuzzy search on filter_by values. Useful for controlling prefix matches like company_name:Acm*. - type: integer - min_len_1typo: - description: | - Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - type: integer - min_len_2typo: - description: | - Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - type: integer - num_typos: - description: | - The number of typographical errors (1 or 2) that would be tolerated. Default: 2 - type: string - offset: - description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. - type: integer - override_tags: - description: Comma separated list of tags to trigger the curations rules that match the tags. - type: string - page: - description: Results from this specific page number would be fetched. - type: integer - per_page: - description: 'Number of results to fetch per page. Default: 10' - type: integer - pinned_hits: - description: | - A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. - You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - type: string - pre_segmented_query: - description: | - You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. - Set this parameter to true to do the same - type: boolean - prefix: - description: Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. - type: string - preset: - description: | - Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. - type: string - prioritize_exact_match: - default: true - description: | - Set this parameter to true to ensure that an exact match is ranked above the others - type: boolean - prioritize_num_matching_fields: - default: true - description: | - Make Typesense prioritize documents where the query words appear in more number of fields. - type: boolean - prioritize_token_position: - default: false - description: | - Make Typesense prioritize documents where the query words appear earlier in the text. - type: boolean - q: - description: The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. - type: string - query_by: - description: A list of `string` fields that should be queried against. Multiple fields are separated with a comma. - type: string - query_by_weights: - description: The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. - type: string - remote_embedding_num_tries: - description: | - Number of times to retry fetching remote embeddings. - type: integer - remote_embedding_timeout_ms: - description: | - Timeout (in milliseconds) for fetching remote embeddings. - type: integer - search_cutoff_ms: - description: | - Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. - type: integer - snippet_threshold: - description: | - Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 - type: integer - sort_by: - description: A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` - example: num_employees:desc - type: string - split_join_tokens: - description: | - Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. Splitting/joining of tokens will only be attempted if the original query produces no results. To always trigger this behavior, set value to `always``. To disable, set value to `off`. Default is `fallback`. - type: string - stopwords: - description: | - Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. - type: string - synonym_num_typos: - description: | - Allow synonym resolution on typo-corrected words in the query. Default: 0 - type: integer - synonym_prefix: - description: | - Allow synonym resolution on word prefixes in the query. Default: false - type: boolean - text_match_type: - description: In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. - type: string - typo_tokens_threshold: - description: | - If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 - type: integer - use_cache: - description: | - Enable server side caching of search query results. By default, caching is disabled. - type: boolean - vector_query: - description: | - Vector query expression for fetching documents "closest" to a given query/document vector. - type: string - voice_query: - description: | - The base64 encoded audio file in 16 khz 16-bit WAV format. - type: string - type: object - SearchResult: - properties: - conversation: - $ref: '#/components/schemas/SearchResultConversation' - facet_counts: - items: - $ref: '#/components/schemas/FacetCounts' - type: array - found: - description: The number of documents found - type: integer - found_docs: - type: integer - grouped_hits: - items: - $ref: '#/components/schemas/SearchGroupedHit' - type: array - hits: - description: The documents that matched the search query - items: - $ref: '#/components/schemas/SearchResultHit' - type: array - out_of: - description: The total number of documents in the collection - type: integer - page: - description: The search result page number - type: integer - request_params: - properties: - collection_name: - type: string - per_page: - type: integer - q: - type: string - voice_query: - properties: - transcribed_query: - type: string - type: object - required: - - collection_name - - q - - per_page - type: object - search_cutoff: - description: Whether the search was cut off - type: boolean - search_time_ms: - description: The number of milliseconds the search took - type: integer - type: object - SearchResultConversation: - properties: - answer: - type: string - conversation_history: - items: + - name: deleteDocumentsParameters + in: query + schema: type: object - type: array - conversation_id: - type: string - query: - type: string - required: - - answer - - conversation_history - - conversation_id - - query - type: object - SearchResultHit: - example: - document: - company_name: Stark Industries - country: USA - id: "124" - num_employees: 5215 - highlights: - company_name: - field: company_name - snippet: Stark Industries - text_match: 1234556 - properties: - document: - description: Can be any key-value pair - type: object - geo_distance_meters: - additionalProperties: - type: integer - description: Can be any key-value pair - type: object - highlight: - additionalProperties: true - description: Highlighted version of the matching document - type: object - highlights: - description: (Deprecated) Contains highlighted portions of the search fields - items: - $ref: '#/components/schemas/SearchHighlight' - type: array - text_match: - format: int64 - type: integer - text_match_info: - properties: - best_field_score: - type: string - best_field_weight: - type: integer - fields_matched: - type: integer - num_tokens_dropped: - format: int64 - type: integer - x-go-type: uint64 - score: - type: string - tokens_matched: - type: integer - typo_prefix_score: - type: integer - type: object - vector_distance: - description: Distance between the query vector and matching document's vector value - format: float - type: number - type: object - SearchSynonym: - allOf: - - $ref: '#/components/schemas/SearchSynonymSchema' - - properties: - id: - readOnly: true - type: string - required: - - id - type: object - SearchSynonymDeleteResponse: - properties: - id: - description: The id of the synonym that was deleted - type: string - required: - - id - type: object - SearchSynonymSchema: - properties: - locale: - description: Locale for the synonym, leave blank to use the standard tokenizer. - type: string - root: - description: For 1-way synonyms, indicates the root word that words in the `synonyms` parameter map to. - type: string - symbols_to_index: - description: By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is. - items: + required: + - filter_by + properties: + filter_by: + type: string + example: "num_employees:>100 && country: [USA, UK]" + batch_size: + description: + Batch size parameter controls the number of documents that should be deleted + at a time. A larger value will speed up deletions, but will impact performance + of other operations running on the server. + type: integer + ignore_not_found: + type: boolean + truncate: + description: When true, removes all documents from the collection while preserving the collection and its schema. + type: boolean + responses: + '200': + description: Documents successfully deleted + content: + application/json: + schema: + type: object + required: + - num_deleted + properties: + num_deleted: + type: integer + '404': + description: Collection not found + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + /collections/{collectionName}/documents/search: + get: + tags: + - documents + summary: Search for documents in a collection + description: Search for documents in a collection that match the search criteria. + operationId: searchCollection + parameters: + - name: collectionName + in: path + description: The name of the collection to search for the document under + required: true + schema: + type: string + - name: searchParameters + required: true + in: query + schema: + $ref: "#/components/schemas/SearchParameters" + responses: + '200': + description: Search results + content: + application/json: + schema: + $ref: "#/components/schemas/SearchResult" + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + '404': + description: The collection or field was not found + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + /collections/{collectionName}/overrides: + get: + tags: + - documents + - curation + summary: List all collection overrides + operationId: getSearchOverrides + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: + type: string + responses: + '200': + description: List of all search overrides + content: + application/json: + schema: + $ref: "#/components/schemas/SearchOverridesResponse" + /collections/{collectionName}/overrides/{overrideId}: + get: + tags: + - documents + - override + summary: Retrieve a single search override + description: Retrieve the details of a search override, given its id. + operationId: getSearchOverride + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: + type: string + - name: overrideId + in: path + description: The id of the search override + required: true + schema: type: string - type: array - synonyms: - description: Array of words that should be considered as synonyms. - items: + responses: + '200': + description: Search override fetched + content: + application/json: + schema: + $ref: "#/components/schemas/SearchOverride" + put: + tags: + - documents + - curation + summary: Create or update an override to promote certain documents over others + description: + Create or update an override to promote certain documents over others. + Using overrides, you can include or exclude specific documents for a given query. + operationId: upsertSearchOverride + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: type: string - type: array - required: + - name: overrideId + in: path + description: The ID of the search override to create/update + required: true + schema: + type: string + requestBody: + description: The search override object to be created/updated + content: + application/json: + schema: + $ref: "#/components/schemas/SearchOverrideSchema" + required: true + responses: + '200': + description: Created/updated search override + content: + application/json: + schema: + $ref: "#/components/schemas/SearchOverride" + '404': + description: Search override not found + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + delete: + tags: + - documents + - curation + summary: Delete an override associated with a collection + operationId: deleteSearchOverride + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: + type: string + - name: overrideId + in: path + description: The ID of the search override to delete + required: true + schema: + type: string + responses: + '200': + description: The ID of the deleted search override + content: + application/json: + schema: + $ref: "#/components/schemas/SearchOverrideDeleteResponse" + '404': + description: Search override not found + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + /collections/{collectionName}/synonyms: + get: + tags: - synonyms - type: object - SearchSynonymsResponse: - properties: - synonyms: - items: - $ref: '#/components/schemas/SearchSynonym' - type: array - x-go-type: '[]*SearchSynonym' - required: + summary: List all collection synonyms + operationId: getSearchSynonyms + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: + type: string + responses: + '200': + description: List of all search synonyms + content: + application/json: + schema: + $ref: "#/components/schemas/SearchSynonymsResponse" + '404': + description: Search synonyms was not found + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + /collections/{collectionName}/synonyms/{synonymId}: + get: + tags: - synonyms - type: object - SnapshotParameters: - properties: - snapshot_path: - type: string - type: object - StemmingDictionary: - properties: - id: - description: Unique identifier for the dictionary - example: irregular-plurals - type: string - words: - description: List of word mappings in the dictionary - items: - properties: - root: - description: The root form of the word - example: person - type: string - word: - description: The word form to be stemmed - example: people - type: string - required: - - word - - root - type: object - type: array - required: - - id - - words - type: object - StopwordsSetRetrieveSchema: - example: | - {"stopwords": {"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"}} - properties: - stopwords: - $ref: '#/components/schemas/StopwordsSetSchema' - required: - - stopwords - type: object - StopwordsSetSchema: - example: | - {"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"} - properties: - id: - type: string - locale: - type: string - stopwords: - items: + summary: Retrieve a single search synonym + description: Retrieve the details of a search synonym, given its id. + operationId: getSearchSynonym + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: type: string - type: array - required: - - id - - stopwords - type: object - StopwordsSetUpsertSchema: - example: | - {"stopwords": ["Germany", "France", "Italy"], "locale": "en"} - properties: - locale: - type: string - stopwords: - items: + - name: synonymId + in: path + description: The id of the search synonym + required: true + schema: type: string - type: array - required: - - stopwords - type: object - StopwordsSetsRetrieveAllSchema: - example: | - {"stopwords": [{"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"}]} - properties: - stopwords: - items: - $ref: '#/components/schemas/StopwordsSetSchema' - type: array - required: - - stopwords - type: object - SuccessStatus: - properties: - success: - type: boolean - required: - - success - type: object - VoiceQueryModelCollectionConfig: - description: | - Configuration for the voice query model - properties: - model_name: - example: ts/whisper/base.en - type: string - type: object - securitySchemes: - api_key_header: - in: header - name: X-TYPESENSE-API-KEY - type: apiKey -externalDocs: - description: Find out more about Typsesense - url: https://typesense.org -info: - description: An open source search engine for building delightful search experiences. - title: Typesense API - version: "28.0" -openapi: 3.0.3 -paths: - /aliases: - get: - description: List all aliases and the corresponding collections that they map to. - operationId: getAliases responses: - "200": + '200': + description: Search synonym fetched content: application/json: schema: - $ref: '#/components/schemas/CollectionAliasesResponse' - description: List of all collection aliases - summary: List all aliases + $ref: "#/components/schemas/SearchSynonym" + '404': + description: Search synonym was not found + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + put: tags: - - collections - /aliases/{aliasName}: + - synonyms + summary: Create or update a synonym + description: Create or update a synonym to define search terms that should be considered equivalent. + operationId: upsertSearchSynonym + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: + type: string + - name: synonymId + in: path + description: The ID of the search synonym to create/update + required: true + schema: + type: string + requestBody: + description: The search synonym object to be created/updated + content: + application/json: + schema: + $ref: "#/components/schemas/SearchSynonymSchema" + required: true + responses: + '200': + description: Created/updated search synonym + content: + application/json: + schema: + $ref: "#/components/schemas/SearchSynonym" + '404': + description: Search synonym was not found + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" delete: - operationId: deleteAlias + tags: + - synonyms + summary: Delete a synonym associated with a collection + operationId: deleteSearchSynonym parameters: - - description: The name of the alias to delete + - name: collectionName + in: path + description: The name of the collection + required: true + schema: + type: string + - name: synonymId in: path - name: aliasName + description: The ID of the search synonym to delete required: true schema: type: string responses: - "200": + '200': + description: The ID of the deleted search synonym content: application/json: schema: - $ref: '#/components/schemas/CollectionAlias' - description: Collection alias was deleted - "404": + $ref: "#/components/schemas/SearchSynonymDeleteResponse" + '404': + description: Search synonym not found content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Alias not found - summary: Delete an alias - tags: - - collections + $ref: "#/components/schemas/ApiResponse" + + /collections/{collectionName}/documents/export: get: - description: Find out which collection an alias points to by fetching it - operationId: getAlias + tags: + - documents + summary: Export all documents in a collection + description: Export all documents in a collection in JSON lines format. + operationId: exportDocuments parameters: - - description: The name of the alias to retrieve + - name: collectionName in: path - name: aliasName + description: The name of the collection required: true schema: type: string + - name: exportDocumentsParameters + in: query + schema: + type: object + properties: + filter_by: + description: + Filter conditions for refining your search results. Separate + multiple conditions with &&. + type: string + include_fields: + description: List of fields from the document to include in the search result + type: string + exclude_fields: + description: List of fields from the document to exclude in the search result + type: string + responses: - "200": + '200': + description: Exports all the documents in a given collection. content: - application/json: + application/octet-stream: schema: - $ref: '#/components/schemas/CollectionAlias' - description: Collection alias fetched - "404": + type: string + example: | + {"id": "124", "company_name": "Stark Industries", "num_employees": 5215, "country": "US"} + {"id": "125", "company_name": "Future Technology", "num_employees": 1232,"country": "UK"} + {"id": "126", "company_name": "Random Corp.", "num_employees": 531,"country": "AU"} + '404': + description: The collection was not found content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: The alias was not found - summary: Retrieve an alias + $ref: "#/components/schemas/ApiResponse" + /collections/{collectionName}/documents/import: + post: tags: - - collections - put: - description: Create or update a collection alias. An alias is a virtual collection name that points to a real collection. If you're familiar with symbolic links on Linux, it's very similar to that. Aliases are useful when you want to reindex your data in the background on a new collection and switch your application to it without any changes to your code. - operationId: upsertAlias + - documents + summary: Import documents into a collection + description: + The documents to be imported must be formatted in a newline delimited + JSON structure. You can feed the output file from a Typesense export operation + directly as import. + operationId: importDocuments parameters: - - description: The name of the alias to create/update + - name: collectionName in: path - name: aliasName + description: The name of the collection required: true schema: type: string + # Do not change the index position of this param + - name: importDocumentsParameters + in: query + schema: + type: object + properties: + batch_size: + type: integer + return_id: + type: boolean + description: + Returning the id of the imported documents. If you want the + import response to return the ingested document's id in the + response, you can use the return_id parameter. + remote_embedding_batch_size: + type: integer + return_doc: + type: boolean + action: + $ref: "#/components/schemas/IndexAction" + dirty_values: + $ref: "#/components/schemas/DirtyValues" requestBody: + description: The json array of documents or the JSONL file to import content: - application/json: + application/octet-stream: schema: - $ref: '#/components/schemas/CollectionAliasSchema' - description: Collection alias to be created/updated + type: string + description: The JSONL file to import + required: true responses: - "200": + '200': + description: + Result of the import operation. Each line of the response indicates the result + of each document present in the request body (in the same order). If the import + of a single document fails, it does not affect the other documents. + If there is a failure, the response line will include a corresponding error + message and as well as the actual document content. + content: + application/octet-stream: + schema: + type: string + example: | + {"success": true} + {"success": false, "error": "Bad JSON.", "document": "[bad doc"} + '400': + description: Bad request, see error message for details content: application/json: schema: - $ref: '#/components/schemas/CollectionAlias' - description: The collection alias was created/updated - "400": + $ref: "#/components/schemas/ApiResponse" + '404': + description: The collection was not found content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Bad request, see error message for details - "404": + $ref: "#/components/schemas/ApiResponse" + /collections/{collectionName}/documents/{documentId}: + get: + tags: + - documents + summary: Retreive a document + description: Fetch an individual document from a collection by using its ID. + operationId: getDocument + parameters: + - name: collectionName + in: path + description: The name of the collection to search for the document under + required: true + schema: + type: string + - name: documentId + in: path + description: The Document ID + required: true + schema: + type: string + responses: + '200': + description: The document referenced by the ID content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Alias not found - summary: Create or update a collection alias + type: object + description: Can be any key-value pair + '404': + description: The document or collection was not found + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + patch: tags: - - collections - /analytics/events: - post: - description: Sending events for analytics e.g rank search results based on popularity. - operationId: createAnalyticsEvent + - documents + summary: Update a document + description: + Update an individual document from a collection by using its ID. + The update can be partial. + operationId: updateDocument + parameters: + - name: collectionName + in: path + description: The name of the collection to search for the document under + required: true + schema: + type: string + - name: documentId + in: path + description: The Document ID + required: true + schema: + type: string + - name: dirty_values + in: query + description: Dealing with Dirty Data + schema: + $ref: "#/components/schemas/DirtyValues" requestBody: + description: The document object with fields to be updated content: application/json: schema: - $ref: '#/components/schemas/AnalyticsEventCreateSchema' - description: The Analytics event to be created + type: object + description: Can be any key-value pair + x-go-type: "interface{}" required: true responses: - "201": + '200': + description: The document referenced by the ID was updated content: application/json: schema: - $ref: '#/components/schemas/AnalyticsEventCreateResponse' - description: Analytics event successfully created - "400": + type: object + description: Can be any key-value pair + '404': + description: The document or collection was not found content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Bad request, see error message for details - summary: Create an analytics event + $ref: "#/components/schemas/ApiResponse" + delete: tags: - - analytics - /analytics/rules: + - documents + summary: Delete a document + description: Delete an individual document from a collection by using its ID. + operationId: deleteDocument + parameters: + - name: collectionName + in: path + description: The name of the collection to search for the document under + required: true + schema: + type: string + - name: documentId + in: path + description: The Document ID + required: true + schema: + type: string + responses: + '200': + description: The document referenced by the ID was deleted + content: + application/json: + schema: + type: object + description: Can be any key-value pair + '404': + description: The document or collection was not found + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + /conversations/models: get: - description: Retrieve the details of all analytics rules - operationId: retrieveAnalyticsRules + description: Retrieve all conversation models + operationId: retrieveAllConversationModels responses: - "200": + '200': content: application/json: schema: - $ref: '#/components/schemas/AnalyticsRulesRetrieveSchema' - description: Analytics rules fetched - summary: Retrieves all analytics rules + items: + $ref: '#/components/schemas/ConversationModelSchema' + type: array + x-go-type: '[]*ConversationModelSchema' + description: List of all conversation models + summary: List all conversation models tags: - - analytics + - conversations post: - description: When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. - operationId: createAnalyticsRule + description: Create a Conversation Model + operationId: createConversationModel requestBody: content: application/json: schema: - $ref: '#/components/schemas/AnalyticsRuleSchema' - description: The Analytics rule to be created + $ref: '#/components/schemas/ConversationModelCreateSchema' required: true responses: - "201": + '200': content: application/json: schema: - $ref: '#/components/schemas/AnalyticsRuleSchema' - description: Analytics rule successfully created - "400": + $ref: '#/components/schemas/ConversationModelSchema' + description: Created Conversation Model + '400': content: application/json: schema: $ref: '#/components/schemas/ApiResponse' description: Bad request, see error message for details - summary: Creates an analytics rule tags: - - analytics - /analytics/rules/{ruleName}: - delete: - description: Permanently deletes an analytics rule, given it's name - operationId: deleteAnalyticsRule + - conversations + /conversations/models/{modelId}: + get: + description: Retrieve a conversation model + operationId: retrieveConversationModel parameters: - - description: The name of the analytics rule to delete + - name: modelId in: path - name: ruleName + description: The id of the conversation model to retrieve required: true schema: type: string responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/AnalyticsRuleDeleteResponse' - description: Analytics rule deleted - "404": + '200': content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Analytics rule not found - summary: Delete an analytics rule + $ref: '#/components/schemas/ConversationModelSchema' + description: A conversation model + summary: Retrieve a conversation model tags: - - analytics - get: - description: Retrieve the details of an analytics rule, given it's name - operationId: retrieveAnalyticsRule + - conversations + put: + description: Update a conversation model + operationId: updateConversationModel + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ConversationModelUpdateSchema' + required: true parameters: - - description: The name of the analytics rule to retrieve + - name: modelId in: path - name: ruleName + description: The id of the conversation model to update required: true schema: type: string responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/AnalyticsRuleSchema' - description: Analytics rule fetched - "404": + '200': content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Analytics rule not found - summary: Retrieves an analytics rule + $ref: '#/components/schemas/ConversationModelSchema' + description: The conversation model was successfully updated + summary: Update a conversation model tags: - - analytics - put: - description: Upserts an analytics rule with the given name. - operationId: upsertAnalyticsRule + - conversations + delete: + description: Delete a conversation model + operationId: deleteConversationModel parameters: - - description: The name of the analytics rule to upsert + - name: modelId in: path - name: ruleName + description: The id of the conversation model to delete required: true schema: type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/AnalyticsRuleUpsertSchema' - description: The Analytics rule to be upserted - required: true responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/AnalyticsRuleSchema' - description: Analytics rule successfully upserted - "400": + '200': content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Bad request, see error message for details - summary: Upserts an analytics rule + $ref: '#/components/schemas/ConversationModelSchema' + description: The conversation model was successfully deleted + summary: Delete a conversation model tags: - - analytics - /collections: + - conversations + /keys: get: - description: Returns a summary of all your collections. The collections are returned sorted by creation date, with the most recent collections appearing first. - operationId: getCollections + tags: + - keys + summary: Retrieve (metadata about) all keys. + operationId: getKeys responses: - "200": + '200': + description: List of all keys content: application/json: schema: - items: - $ref: '#/components/schemas/CollectionResponse' - type: array - x-go-type: '[]*CollectionResponse' - description: List of all collections - summary: List all collections - tags: - - collections + $ref: "#/components/schemas/ApiKeysResponse" post: - description: When a collection is created, we give it a name and describe the fields that will be indexed from the documents added to the collection. - operationId: createCollection + tags: + - keys + summary: Create an API Key + description: + Create an API Key with fine-grain access control. You can restrict access + on both a per-collection and per-action level. + The generated key is returned only during creation. You want to store + this key carefully in a secure place. + operationId: createKey requestBody: + description: The object that describes API key scope content: application/json: schema: - $ref: '#/components/schemas/CollectionSchema' - description: The collection object to be created - required: true + $ref: "#/components/schemas/ApiKeySchema" responses: - "201": + '201': + description: Created API key content: application/json: schema: - $ref: '#/components/schemas/CollectionResponse' - description: Collection successfully created - "400": + $ref: "#/components/schemas/ApiKey" + '400': + description: Bad request, see error message for details content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Bad request, see error message for details - "409": + $ref: "#/components/schemas/ApiResponse" + '409': + description: API key generation conflict content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Collection already exists - summary: Create a new collection + $ref: "#/components/schemas/ApiResponse" + /keys/{keyId}: + get: tags: - - collections - /collections/{collectionName}: - delete: - description: Permanently drops a collection. This action cannot be undone. For large collections, this might have an impact on read latencies. - operationId: deleteCollection + - keys + summary: Retrieve (metadata about) a key + description: + Retrieve (metadata about) a key. Only the key prefix is returned + when you retrieve a key. Due to security reasons, only the create endpoint + returns the full API key. + operationId: getKey parameters: - - description: The name of the collection to delete + - name: keyId in: path - name: collectionName + description: The ID of the key to retrieve required: true schema: - type: string + type: integer + format: int64 responses: - "200": + '200': + description: The key referenced by the ID content: application/json: schema: - $ref: '#/components/schemas/CollectionResponse' - description: Collection deleted - "404": + $ref: "#/components/schemas/ApiKey" + '404': + description: The key was not found content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Collection not found - summary: Delete a collection + $ref: "#/components/schemas/ApiResponse" + delete: tags: - - collections - get: - description: Retrieve the details of a collection, given its name. - operationId: getCollection + - keys + summary: Delete an API key given its ID. + operationId: deleteKey parameters: - - description: The name of the collection to retrieve + - name: keyId in: path - name: collectionName + description: The ID of the key to delete required: true schema: - type: string + type: integer + format: int64 responses: - "200": + '200': + description: The key referenced by the ID content: application/json: schema: - $ref: '#/components/schemas/CollectionResponse' - description: Collection fetched - "404": + $ref: "#/components/schemas/ApiKeyDeleteResponse" + '400': + description: Bad request, see error message for details content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Collection not found - summary: Retrieve a single collection + $ref: "#/components/schemas/ApiResponse" + '404': + description: Key not found + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + /aliases: + get: tags: - collections - patch: - description: Update a collection's schema to modify the fields and their types. - operationId: updateCollection + summary: List all aliases + description: List all aliases and the corresponding collections that they map to. + operationId: getAliases + responses: + '200': + description: List of all collection aliases + content: + application/json: + schema: + $ref: "#/components/schemas/CollectionAliasesResponse" + /aliases/{aliasName}: + put: + tags: + - collections + summary: Create or update a collection alias + description: + Create or update a collection alias. An alias is a virtual collection name that points + to a real collection. If you're familiar with symbolic links on Linux, it's very similar + to that. Aliases are useful when you want to reindex your data in the + background on a new collection and switch your application to it without any changes to + your code. + operationId: upsertAlias parameters: - - description: The name of the collection to update + - name: aliasName in: path - name: collectionName + description: The name of the alias to create/update required: true schema: type: string requestBody: + description: Collection alias to be created/updated content: application/json: schema: - $ref: '#/components/schemas/CollectionUpdateSchema' - description: The document object with fields to be updated - required: true + $ref: "#/components/schemas/CollectionAliasSchema" responses: - "200": + '200': + description: The collection alias was created/updated content: application/json: schema: - $ref: '#/components/schemas/CollectionUpdateSchema' - description: The updated partial collection schema - "400": + $ref: "#/components/schemas/CollectionAlias" + '400': + description: Bad request, see error message for details content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Bad request, see error message for details - "404": + $ref: "#/components/schemas/ApiResponse" + '404': + description: Alias not found content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: The collection was not found - summary: Update a collection + $ref: "#/components/schemas/ApiResponse" + get: tags: - collections - /collections/{collectionName}/documents: - delete: - description: Delete a bunch of documents that match a specific filter condition. Use the `batch_size` parameter to control the number of documents that should deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. - operationId: deleteDocuments + summary: Retrieve an alias + description: Find out which collection an alias points to by fetching it + operationId: getAlias parameters: - - description: The name of the collection to delete documents from + - name: aliasName in: path - name: collectionName + description: The name of the alias to retrieve required: true schema: type: string - - in: query - name: batch_size - schema: - type: integer - - in: query - name: filter_by - schema: - type: string - - in: query - name: ignore_not_found - schema: - type: boolean - - in: query - name: truncate - schema: - type: boolean responses: - "200": + '200': + description: Collection alias fetched content: application/json: schema: - properties: - num_deleted: - type: integer - required: - - num_deleted - type: object - description: Documents successfully deleted - "404": + $ref: "#/components/schemas/CollectionAlias" + '404': + description: The alias was not found content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Collection not found - summary: Delete a bunch of documents + $ref: "#/components/schemas/ApiResponse" + delete: tags: - - documents - patch: - description: The filter_by query parameter is used to filter to specify a condition against which the documents are matched. The request body contains the fields that should be updated for any documents that match the filter condition. This endpoint is only available if the Typesense server is version `0.25.0.rc12` or later. - operationId: updateDocuments + - collections + summary: Delete an alias + operationId: deleteAlias parameters: - - description: The name of the collection to update documents in + - name: aliasName in: path - name: collectionName + description: The name of the alias to delete required: true schema: type: string - - in: query - name: filter_by - schema: - type: string - requestBody: - content: - application/json: - schema: - description: Can be any key-value pair - type: object - x-go-type: interface{} - description: The document fields to be updated - required: true responses: - "200": - content: - application/json: - schema: - properties: - num_updated: - description: The number of documents that have been updated - example: 1 - type: integer - required: - - num_updated - type: object - description: The response contains a single field, `num_updated`, indicating the number of documents affected. - "400": + '200': + description: Collection alias was deleted content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Bad request, see error message for details - "404": + $ref: "#/components/schemas/CollectionAlias" + '404': + description: Alias not found content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: The collection was not found - summary: Update documents with conditional query + $ref: "#/components/schemas/ApiResponse" + /debug: + get: tags: - - documents - post: - description: A document to be indexed in a given collection must conform to the schema of the collection. - operationId: indexDocument - parameters: - - description: The name of the collection to add the document to - in: path - name: collectionName - required: true - schema: - type: string - - description: Additional action to perform - in: query - name: action - schema: - $ref: '#/components/schemas/IndexAction' - example: upsert - type: string - - description: Dealing with Dirty Data - in: query - name: dirty_values - schema: - $ref: '#/components/schemas/DirtyValues' - requestBody: - content: - application/json: - schema: - description: Can be any key-value pair - type: object - x-go-type: interface{} - description: The document object to be indexed - required: true + - debug + summary: Print debugging information + description: Print debugging information + operationId: debug responses: - "201": + '200': + description: Debugging information content: application/json: schema: - description: Can be any key-value pair type: object - description: Document successfully created/indexed - "404": - content: - application/json: - schema: - $ref: '#/components/schemas/ApiResponse' - description: Collection not found - summary: Index a document + properties: + version: + type: string + /health: + get: tags: - - documents - /collections/{collectionName}/documents/{documentId}: - delete: - description: Delete an individual document from a collection by using its ID. - operationId: deleteDocument - parameters: - - description: The name of the collection to search for the document under - in: path - name: collectionName - required: true - schema: - type: string - - description: The Document ID - in: path - name: documentId - required: true - schema: - type: string + - health + summary: Checks if Typesense server is ready to accept requests. + description: Checks if Typesense server is ready to accept requests. + operationId: health responses: - "200": + '200': + description: Search service is ready for requests. content: application/json: schema: - description: Can be any key-value pair - type: object - description: The document referenced by the ID was deleted - "404": + $ref: "#/components/schemas/HealthStatus" + /operations/schema_changes: + get: + tags: + - operations + summary: Get the status of in-progress schema change operations + description: Returns the status of any ongoing schema change operations. If no schema changes are in progress, returns an empty response. + operationId: getSchemaChanges + responses: + '200': + description: List of schema changes in progress content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: The document or collection was not found - summary: Delete a document + type: array + items: + $ref: "#/components/schemas/SchemaChangeStatus" + /operations/snapshot: + post: tags: - - documents - get: - description: Fetch an individual document from a collection by using its ID. - operationId: getDocument + - operations + summary: Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. + description: + Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. + You can then backup the snapshot directory that gets created and later restore it + as a data directory, as needed. + operationId: takeSnapshot parameters: - - description: The name of the collection to search for the document under - in: path - name: collectionName - required: true - schema: - type: string - - description: The Document ID - in: path - name: documentId + - name: snapshot_path + in: query + description: The directory on the server where the snapshot should be saved. required: true schema: type: string responses: - "200": + '201': + description: Snapshot is created. content: application/json: schema: - description: Can be any key-value pair - type: object - description: The document referenced by the ID - "404": + $ref: "#/components/schemas/SuccessStatus" + /operations/vote: + post: + tags: + - operations + summary: Triggers a follower node to initiate the raft voting process, which triggers leader re-election. + description: + Triggers a follower node to initiate the raft voting process, which triggers leader re-election. + The follower node that you run this operation against will become the new leader, + once this command succeeds. + operationId: vote + responses: + '200': + description: Re-election is performed. content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: The document or collection was not found - summary: Retreive a document + $ref: "#/components/schemas/SuccessStatus" + /multi_search: + post: + operationId: multiSearch tags: - documents - patch: - description: Update an individual document from a collection by using its ID. The update can be partial. - operationId: updateDocument + summary: send multiple search requests in a single HTTP request + description: + This is especially useful to avoid round-trip network latencies incurred otherwise if each of these requests are sent in separate HTTP requests. + You can also use this feature to do a federated search across multiple collections in a single HTTP request. parameters: - - description: The name of the collection to search for the document under - in: path - name: collectionName - required: true - schema: - type: string - - description: The Document ID - in: path - name: documentId + - name: multiSearchParameters required: true - schema: - type: string - - description: Dealing with Dirty Data in: query - name: dirty_values schema: - $ref: '#/components/schemas/DirtyValues' + $ref: "#/components/schemas/MultiSearchParameters" requestBody: content: application/json: schema: - description: Can be any key-value pair - type: object - x-go-type: interface{} - description: The document object with fields to be updated - required: true + $ref: "#/components/schemas/MultiSearchSearchesParameter" responses: - "200": + '200': + description: Search results content: application/json: schema: - description: Can be any key-value pair - type: object - description: The document referenced by the ID was updated - "404": + $ref: "#/components/schemas/MultiSearchResult" + '400': + description: Bad request, see error message for details content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: The document or collection was not found - summary: Update a document + $ref: "#/components/schemas/ApiResponse" + /analytics/events: + post: tags: - - documents - /collections/{collectionName}/documents/export: - get: - description: Export all documents in a collection in JSON lines format. - operationId: exportDocuments - parameters: - - description: The name of the collection - in: path - name: collectionName - required: true - schema: - type: string - - in: query - name: exclude_fields - schema: - type: string - - in: query - name: filter_by - schema: - type: string - - in: query - name: include_fields - schema: - type: string + - analytics + summary: Create an analytics event + description: Sending events for analytics e.g rank search results based on popularity. + operationId: createAnalyticsEvent + requestBody: + description: The Analytics event to be created + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsEventCreateSchema' + required: true responses: - "200": + '201': + description: Analytics event successfully created content: - application/octet-stream: + application/json: schema: - example: | - {"id": "124", "company_name": "Stark Industries", "num_employees": 5215, "country": "US"} - {"id": "125", "company_name": "Future Technology", "num_employees": 1232,"country": "UK"} - {"id": "126", "company_name": "Random Corp.", "num_employees": 531,"country": "AU"} - type: string - description: Exports all the documents in a given collection. - "404": + $ref: '#/components/schemas/AnalyticsEventCreateResponse' + '400': + description: Bad request, see error message for details content: application/json: schema: $ref: '#/components/schemas/ApiResponse' - description: The collection was not found - summary: Export all documents in a collection - tags: - - documents - /collections/{collectionName}/documents/import: + /analytics/rules: post: - description: The documents to be imported must be formatted in a newline delimited JSON structure. You can feed the output file from a Typesense export operation directly as import. - operationId: importDocuments - parameters: - - description: The name of the collection - in: path - name: collectionName - required: true - schema: - type: string - - in: query - name: action - schema: - $ref: '#/components/schemas/IndexAction' - - in: query - name: batch_size - schema: - type: integer - - in: query - name: dirty_values - schema: - $ref: '#/components/schemas/DirtyValues' - - in: query - name: remote_embedding_batch_size - schema: - type: integer - - in: query - name: return_doc - schema: - type: boolean - - in: query - name: return_id - schema: - type: boolean + tags: + - analytics + summary: Creates an analytics rule + description: + When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. + operationId: createAnalyticsRule requestBody: + description: The Analytics rule to be created content: - application/octet-stream: + application/json: schema: - description: The JSONL file to import - type: string - description: The json array of documents or the JSONL file to import + $ref: "#/components/schemas/AnalyticsRuleSchema" required: true responses: - "200": + '201': + description: Analytics rule successfully created content: - application/octet-stream: + application/json: schema: - example: | - {"success": true} - {"success": false, "error": "Bad JSON.", "document": "[bad doc"} - type: string - description: Result of the import operation. Each line of the response indicates the result of each document present in the request body (in the same order). If the import of a single document fails, it does not affect the other documents. If there is a failure, the response line will include a corresponding error message and as well as the actual document content. - "400": + $ref: "#/components/schemas/AnalyticsRuleSchema" + '400': + description: Bad request, see error message for details content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Bad request, see error message for details - "404": + $ref: "#/components/schemas/ApiResponse" + get: + tags: + - analytics + summary: Retrieves all analytics rules + description: + Retrieve the details of all analytics rules + operationId: retrieveAnalyticsRules + responses: + '200': + description: Analytics rules fetched content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: The collection was not found - summary: Import documents into a collection + $ref: "#/components/schemas/AnalyticsRulesRetrieveSchema" + /analytics/rules/{ruleName}: + put: tags: - - documents - /collections/{collectionName}/documents/search: - get: - description: Search for documents in a collection that match the search criteria. - operationId: searchCollection + - analytics + summary: Upserts an analytics rule + description: + Upserts an analytics rule with the given name. + operationId: upsertAnalyticsRule parameters: - - description: The name of the collection to search for the document under - in: path - name: collectionName - required: true - schema: - type: string - - in: query - name: cache_ttl - schema: - type: integer - - in: query - name: conversation - schema: - type: boolean - - in: query - name: conversation_id - schema: - type: string - - in: query - name: conversation_model_id - schema: - type: string - - in: query - name: drop_tokens_mode - schema: - $ref: '#/components/schemas/DropTokensMode' - - in: query - name: drop_tokens_threshold - schema: - type: integer - - in: query - name: enable_highlight_v1 - schema: - type: boolean - - in: query - name: enable_overrides - schema: - type: boolean - - in: query - name: enable_synonyms - schema: - type: boolean - - in: query - name: enable_typos_for_alpha_numerical_tokens - schema: - type: boolean - - in: query - name: enable_typos_for_numerical_tokens - schema: - type: boolean - - in: query - name: exclude_fields - schema: - type: string - - in: query - name: exhaustive_search - schema: - type: boolean - - in: query - name: facet_by - schema: - type: string - - in: query - name: facet_query - schema: - type: string - - in: query - name: facet_return_parent - schema: - type: string - - in: query - name: facet_strategy - schema: - type: string - - in: query - name: filter_by - schema: - type: string - - in: query - name: filter_curated_hits - schema: - type: boolean - - in: query - name: group_by - schema: - type: string - - in: query - name: group_limit - schema: - type: integer - - in: query - name: group_missing_values - schema: - type: boolean - - in: query - name: hidden_hits - schema: - type: string - - in: query - name: highlight_affix_num_tokens - schema: - type: integer - - in: query - name: highlight_end_tag - schema: - type: string - - in: query - name: highlight_fields - schema: - type: string - - in: query - name: highlight_full_fields - schema: - type: string - - in: query - name: highlight_start_tag - schema: - type: string - - in: query - name: include_fields - schema: - type: string - - in: query - name: infix - schema: - type: string - - in: query - name: limit - schema: - type: integer - - in: query - name: max_candidates - schema: - type: integer - - in: query - name: max_extra_prefix - schema: - type: integer - - in: query - name: max_extra_suffix - schema: - type: integer - - in: query - name: max_facet_values - schema: - type: integer - - in: query - name: max_filter_by_candidates - schema: - type: integer - - in: query - name: min_len_1typo - schema: - type: integer - - in: query - name: min_len_2typo - schema: - type: integer - - in: query - name: num_typos - schema: - type: string - - in: query - name: offset - schema: - type: integer - - in: query - name: override_tags - schema: - type: string - - in: query - name: page - schema: - type: integer - - in: query - name: per_page - schema: - type: integer - - in: query - name: pinned_hits - schema: - type: string - - in: query - name: pre_segmented_query - schema: - type: boolean - - in: query - name: prefix - schema: - type: string - - in: query - name: preset - schema: - type: string - - in: query - name: prioritize_exact_match - schema: - type: boolean - - in: query - name: prioritize_num_matching_fields - schema: - type: boolean - - in: query - name: prioritize_token_position - schema: - type: boolean - - in: query - name: q - schema: - type: string - - in: query - name: query_by - schema: - type: string - - in: query - name: query_by_weights - schema: - type: string - - in: query - name: remote_embedding_num_tries - schema: - type: integer - - in: query - name: remote_embedding_timeout_ms - schema: - type: integer - - in: query - name: search_cutoff_ms - schema: - type: integer - - in: query - name: snippet_threshold - schema: - type: integer - - in: query - name: sort_by - schema: - type: string - - in: query - name: split_join_tokens - schema: - type: string - - in: query - name: stopwords - schema: - type: string - - in: query - name: synonym_num_typos - schema: - type: integer - - in: query - name: synonym_prefix - schema: - type: boolean - - in: query - name: text_match_type - schema: - type: string - - in: query - name: typo_tokens_threshold - schema: - type: integer - - in: query - name: use_cache - schema: - type: boolean - - in: query - name: vector_query - schema: - type: string - - in: query - name: voice_query + - in: path + name: ruleName + description: The name of the analytics rule to upsert schema: type: string + required: true + requestBody: + description: The Analytics rule to be upserted + content: + application/json: + schema: + $ref: "#/components/schemas/AnalyticsRuleUpsertSchema" + required: true responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/SearchResult' - description: Search results - "400": + '200': + description: Analytics rule successfully upserted content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' + $ref: "#/components/schemas/AnalyticsRuleSchema" + '400': description: Bad request, see error message for details - "404": content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: The collection or field was not found - summary: Search for documents in a collection - tags: - - documents - /collections/{collectionName}/overrides: + $ref: "#/components/schemas/ApiResponse" get: - operationId: getSearchOverrides + tags: + - analytics + summary: Retrieves an analytics rule + description: + Retrieve the details of an analytics rule, given it's name + operationId: retrieveAnalyticsRule parameters: - - description: The name of the collection - in: path - name: collectionName - required: true + - in: path + name: ruleName + description: The name of the analytics rule to retrieve schema: type: string + required: true responses: - "200": + '200': + description: Analytics rule fetched content: application/json: schema: - $ref: '#/components/schemas/SearchOverridesResponse' - description: List of all search overrides - summary: List all collection overrides - tags: - - documents - - curation - /collections/{collectionName}/overrides/{overrideId}: + $ref: "#/components/schemas/AnalyticsRuleSchema" + '404': + description: Analytics rule not found + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" delete: - operationId: deleteSearchOverride + tags: + - analytics + summary: Delete an analytics rule + description: + Permanently deletes an analytics rule, given it's name + operationId: deleteAnalyticsRule parameters: - - description: The name of the collection - in: path - name: collectionName - required: true + - in: path + name: ruleName + description: The name of the analytics rule to delete schema: type: string - - description: The ID of the search override to delete - in: path - name: overrideId required: true - schema: - type: string responses: - "200": + '200': + description: Analytics rule deleted content: application/json: schema: - $ref: '#/components/schemas/SearchOverrideDeleteResponse' - description: The ID of the deleted search override - "404": + $ref: "#/components/schemas/AnalyticsRuleDeleteResponse" + '404': + description: Analytics rule not found content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Search override not found - summary: Delete an override associated with a collection + $ref: "#/components/schemas/ApiResponse" + /metrics.json: + get: tags: - - documents - - curation + - operations + summary: Get current RAM, CPU, Disk & Network usage metrics. + description: + Retrieve the metrics. + operationId: retrieveMetrics + responses: + '200': + description: Metrics fetched. + content: + application/json: + schema: + type: object + /stats.json: get: - description: Retrieve the details of a search override, given its id. - operationId: getSearchOverride - parameters: - - description: The name of the collection - in: path - name: collectionName - required: true - schema: - type: string - - description: The id of the search override - in: path - name: overrideId - required: true - schema: - type: string + tags: + - operations + summary: Get stats about API endpoints. + description: + Retrieve the stats about API endpoints. + operationId: retrieveAPIStats responses: - "200": + '200': + description: Stats fetched. content: application/json: schema: - $ref: '#/components/schemas/SearchOverride' - description: Search override fetched - summary: Retrieve a single search override + $ref: "#/components/schemas/APIStatsResponse" + /stopwords: + get: tags: - - documents - - override + - stopwords + summary: Retrieves all stopwords sets. + description: + Retrieve the details of all stopwords sets + operationId: retrieveStopwordsSets + responses: + '200': + description: Stopwords sets fetched. + content: + application/json: + schema: + $ref: "#/components/schemas/StopwordsSetsRetrieveAllSchema" + /stopwords/{setId}: put: - description: Create or update an override to promote certain documents over others. Using overrides, you can include or exclude specific documents for a given query. - operationId: upsertSearchOverride + tags: + - stopwords + summary: Upserts a stopwords set. + description: + When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. + operationId: upsertStopwordsSet parameters: - - description: The name of the collection - in: path - name: collectionName - required: true + - in: path + name: setId + description: The ID of the stopwords set to upsert. schema: type: string - - description: The ID of the search override to create/update - in: path - name: overrideId required: true - schema: - type: string + example: countries requestBody: + description: The stopwords set to upsert. content: application/json: schema: - $ref: '#/components/schemas/SearchOverrideSchema' - description: The search override object to be created/updated + $ref: "#/components/schemas/StopwordsSetUpsertSchema" required: true responses: - "200": + '200': + description: Stopwords set successfully upserted. content: application/json: schema: - $ref: '#/components/schemas/SearchOverride' - description: Created/updated search override - "404": + $ref: "#/components/schemas/StopwordsSetSchema" + '400': + description: Bad request, see error message for details. content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Search override not found - summary: Create or update an override to promote certain documents over others - tags: - - documents - - curation - /collections/{collectionName}/synonyms: + $ref: "#/components/schemas/ApiResponse" get: - operationId: getSearchSynonyms + tags: + - stopwords + summary: Retrieves a stopwords set. + description: + Retrieve the details of a stopwords set, given it's name. + operationId: retrieveStopwordsSet parameters: - - description: The name of the collection - in: path - name: collectionName - required: true + - in: path + name: setId + description: The ID of the stopwords set to retrieve. schema: type: string + required: true + example: countries responses: - "200": + '200': + description: Stopwords set fetched. content: application/json: schema: - $ref: '#/components/schemas/SearchSynonymsResponse' - description: List of all search synonyms - "404": + $ref: "#/components/schemas/StopwordsSetRetrieveSchema" + '404': + description: Stopwords set not found. content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Search synonyms was not found - summary: List all collection synonyms - tags: - - synonyms - /collections/{collectionName}/synonyms/{synonymId}: + $ref: "#/components/schemas/ApiResponse" delete: - operationId: deleteSearchSynonym + tags: + - stopwords + summary: Delete a stopwords set. + description: + Permanently deletes a stopwords set, given it's name. + operationId: deleteStopwordsSet parameters: - - description: The name of the collection - in: path - name: collectionName - required: true + - in: path + name: setId + description: The ID of the stopwords set to delete. schema: type: string - - description: The ID of the search synonym to delete - in: path - name: synonymId required: true - schema: - type: string + example: countries responses: - "200": + '200': + description: Stopwords set rule deleted. content: application/json: schema: - $ref: '#/components/schemas/SearchSynonymDeleteResponse' - description: The ID of the deleted search synonym - "404": + type: object + properties: + id: + type: string + required: + - id + example: | + {"id": "countries"} + '404': + description: Stopwords set not found. content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Search synonym not found - summary: Delete a synonym associated with a collection - tags: - - synonyms + $ref: "#/components/schemas/ApiResponse" + /presets: get: - description: Retrieve the details of a search synonym, given its id. - operationId: getSearchSynonym - parameters: - - description: The name of the collection - in: path - name: collectionName - required: true - schema: - type: string - - description: The id of the search synonym - in: path - name: synonymId - required: true - schema: - type: string + tags: + - presets + summary: Retrieves all presets. + description: Retrieve the details of all presets + operationId: retrieveAllPresets responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/SearchSynonym' - description: Search synonym fetched - "404": + '200': + description: Presets fetched. content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Search synonym was not found - summary: Retrieve a single search synonym + $ref: '#/components/schemas/PresetsRetrieveSchema' + /presets/{presetId}: + get: tags: - - synonyms - put: - description: Create or update a synonym to define search terms that should be considered equivalent. - operationId: upsertSearchSynonym + - presets + summary: Retrieves a preset. + description: Retrieve the details of a preset, given it's name. + operationId: retrievePreset parameters: - - description: The name of the collection - in: path - name: collectionName - required: true + - in: path + name: presetId + description: The ID of the preset to retrieve. schema: type: string - - description: The ID of the search synonym to create/update - in: path - name: synonymId required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/SearchSynonymSchema' - description: The search synonym object to be created/updated - required: true + example: listing_view responses: - "200": + '200': + description: Preset fetched. content: application/json: schema: - $ref: '#/components/schemas/SearchSynonym' - description: Created/updated search synonym - "404": + $ref: '#/components/schemas/PresetSchema' + '404': + description: Preset not found. content: application/json: schema: $ref: '#/components/schemas/ApiResponse' - description: Search synonym was not found - summary: Create or update a synonym - tags: - - synonyms - /conversations/models: - get: - description: Retrieve all conversation models - operationId: retrieveAllConversationModels - responses: - "200": - content: - application/json: - schema: - items: - $ref: '#/components/schemas/ConversationModelSchema' - type: array - x-go-type: '[]*ConversationModelSchema' - description: List of all conversation models - summary: List all conversation models + put: tags: - - conversations - post: - description: Create a Conversation Model - operationId: createConversationModel + - presets + summary: Upserts a preset. + description: Create or update an existing preset. + operationId: upsertPreset + parameters: + - in: path + name: presetId + description: The name of the preset set to upsert. + schema: + type: string + required: true + example: listing_view requestBody: + description: The stopwords set to upsert. content: application/json: schema: - $ref: '#/components/schemas/ConversationModelCreateSchema' + $ref: '#/components/schemas/PresetUpsertSchema' required: true responses: - "200": + '200': + description: Preset successfully upserted. content: application/json: schema: - $ref: '#/components/schemas/ConversationModelSchema' - description: Created Conversation Model - "400": + $ref: '#/components/schemas/PresetSchema' + '400': + description: Bad request, see error message for details content: application/json: schema: $ref: '#/components/schemas/ApiResponse' - description: Bad request, see error message for details - tags: - - conversations - /conversations/models/{modelId}: delete: - description: Delete a conversation model - operationId: deleteConversationModel + tags: + - presets + summary: Delete a preset. + description: Permanently deletes a preset, given it's name. + operationId: deletePreset parameters: - - description: The id of the conversation model to delete - in: path - name: modelId - required: true + - in: path + name: presetId + description: The ID of the preset to delete. schema: type: string + required: true + example: listing_view responses: - "200": + '200': + description: Preset deleted. content: application/json: schema: - $ref: '#/components/schemas/ConversationModelSchema' - description: The conversation model was successfully deleted - summary: Delete a conversation model + $ref: '#/components/schemas/PresetDeleteSchema' + '404': + description: Preset not found. + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /stemming/dictionaries: + get: tags: - - conversations + - stemming + summary: List all stemming dictionaries + description: Retrieve a list of all available stemming dictionaries. + operationId: listStemmingDictionaries + responses: + '200': + description: List of all dictionaries + content: + application/json: + schema: + type: object + properties: + dictionaries: + type: array + items: + type: string + example: ["irregular-plurals", "company-terms"] + + /stemming/dictionaries/{dictionaryId}: get: - description: Retrieve a conversation model - operationId: retrieveConversationModel + tags: + - stemming + summary: Retrieve a stemming dictionary + description: Fetch details of a specific stemming dictionary. + operationId: getStemmingDictionary parameters: - - description: The id of the conversation model to retrieve + - name: dictionaryId in: path - name: modelId + description: The ID of the dictionary to retrieve required: true schema: type: string + example: irregular-plurals responses: - "200": + '200': + description: Stemming dictionary details content: application/json: schema: - $ref: '#/components/schemas/ConversationModelSchema' - description: A conversation model - summary: Retrieve a conversation model + $ref: "#/components/schemas/StemmingDictionary" + '404': + description: Dictionary not found + content: + application/json: + schema: + $ref: "#/components/schemas/ApiResponse" + + /stemming/dictionaries/import: + post: tags: - - conversations - put: - description: Update a conversation model - operationId: updateConversationModel + - stemming + summary: Import a stemming dictionary + description: Upload a JSONL file containing word mappings to create or update a stemming dictionary. + operationId: importStemmingDictionary parameters: - - description: The id of the conversation model to update - in: path - name: modelId + - name: id + in: query + description: The ID to assign to the dictionary required: true schema: type: string + example: irregular-plurals requestBody: + description: The JSONL file containing word mappings + required: true content: application/json: schema: - $ref: '#/components/schemas/ConversationModelUpdateSchema' - required: true + type: string + example: | + {"word": "people", "root": "person"} + {"word": "children", "root": "child"} responses: - "200": + '200': + description: Dictionary successfully imported content: - application/json: + application/octet-stream: schema: - $ref: '#/components/schemas/ConversationModelSchema' - description: The conversation model was successfully updated - summary: Update a conversation model - tags: - - conversations - /debug: - get: - description: Print debugging information - operationId: debug - responses: - "200": + type: string + example: > + {"word": "people", "root": "person"} + {"word": "children", "root": "child"} + '400': + description: Bad request, see error message for details content: application/json: schema: - properties: - version: - type: string - type: object - description: Debugging information - summary: Print debugging information - tags: - - debug - /health: + $ref: "#/components/schemas/ApiResponse" + /nl_search_models: get: - description: Checks if Typesense server is ready to accept requests. - operationId: health - responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/HealthStatus' - description: Search service is ready for requests. - summary: Checks if Typesense server is ready to accept requests. tags: - - health - /keys: - get: - operationId: getKeys + - nl_search_models + summary: List all NL search models + description: Retrieve all NL search models. + operationId: retrieveAllNLSearchModels responses: - "200": + '200': + description: List of all NL search models content: application/json: schema: - $ref: '#/components/schemas/ApiKeysResponse' - description: List of all keys - summary: Retrieve (metadata about) all keys. - tags: - - keys + type: array + items: + $ref: '#/components/schemas/NLSearchModelSchema' post: - description: Create an API Key with fine-grain access control. You can restrict access on both a per-collection and per-action level. The generated key is returned only during creation. You want to store this key carefully in a secure place. - operationId: createKey + tags: + - nl_search_models + summary: Create a NL search model + description: Create a new NL search model. + operationId: createNLSearchModel requestBody: + description: The NL search model to be created content: application/json: schema: - $ref: '#/components/schemas/ApiKeySchema' - description: The object that describes API key scope + $ref: '#/components/schemas/NLSearchModelCreateSchema' + required: true responses: - "201": - content: - application/json: - schema: - $ref: '#/components/schemas/ApiKey' - description: Created API key - "400": + '201': + description: NL search model successfully created content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' + $ref: '#/components/schemas/NLSearchModelSchema' + '400': description: Bad request, see error message for details - "409": content: application/json: schema: $ref: '#/components/schemas/ApiResponse' - description: API key generation conflict - summary: Create an API Key + /nl_search_models/{modelId}: + get: tags: - - keys - /keys/{keyId}: - delete: - operationId: deleteKey + - nl_search_models + summary: Retrieve a NL search model + description: Retrieve a specific NL search model by its ID. + operationId: retrieveNLSearchModel parameters: - - description: The ID of the key to delete + - name: modelId in: path - name: keyId + description: The ID of the NL search model to retrieve required: true schema: - format: int64 - type: integer + type: string responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/ApiKeyDeleteResponse' - description: The key referenced by the ID - "400": + '200': + description: NL search model fetched content: application/json: schema: - $ref: '#/components/schemas/ApiResponse' - description: Bad request, see error message for details - "404": + $ref: '#/components/schemas/NLSearchModelSchema' + '404': + description: NL search model not found content: application/json: schema: $ref: '#/components/schemas/ApiResponse' - description: Key not found - summary: Delete an API key given its ID. + put: tags: - - keys - get: - description: Retrieve (metadata about) a key. Only the key prefix is returned when you retrieve a key. Due to security reasons, only the create endpoint returns the full API key. - operationId: getKey + - nl_search_models + summary: Update a NL search model + description: Update an existing NL search model. + operationId: updateNLSearchModel parameters: - - description: The ID of the key to retrieve + - name: modelId in: path - name: keyId + description: The ID of the NL search model to update required: true schema: - format: int64 - type: integer + type: string + requestBody: + description: The NL search model fields to update + content: + application/json: + schema: + $ref: '#/components/schemas/NLSearchModelUpdateSchema' + required: true responses: - "200": + '200': + description: NL search model successfully updated content: application/json: schema: - $ref: '#/components/schemas/ApiKey' - description: The key referenced by the ID - "404": + $ref: '#/components/schemas/NLSearchModelSchema' + '400': + description: Bad request, see error message for details content: application/json: schema: $ref: '#/components/schemas/ApiResponse' - description: The key was not found - summary: Retrieve (metadata about) a key - tags: - - keys - /metrics.json: - get: - description: Retrieve the metrics. - operationId: retrieveMetrics - responses: - "200": + '404': + description: NL search model not found content: application/json: schema: - type: object - description: Metrics fetched. - summary: Get current RAM, CPU, Disk & Network usage metrics. + $ref: '#/components/schemas/ApiResponse' + delete: tags: - - operations - /multi_search: - post: - description: This is especially useful to avoid round-trip network latencies incurred otherwise if each of these requests are sent in separate HTTP requests. You can also use this feature to do a federated search across multiple collections in a single HTTP request. - operationId: multiSearch + - nl_search_models + summary: Delete a NL search model + description: Delete a specific NL search model by its ID. + operationId: deleteNLSearchModel parameters: - - in: query - name: cache_ttl - schema: - type: integer - - in: query - name: conversation - schema: - type: boolean - - in: query - name: conversation_id - schema: - type: string - - in: query - name: conversation_model_id - schema: - type: string - - in: query - name: drop_tokens_mode - schema: - $ref: '#/components/schemas/DropTokensMode' - - in: query - name: drop_tokens_threshold - schema: - type: integer - - in: query - name: enable_highlight_v1 - schema: - type: boolean - - in: query - name: enable_overrides - schema: - type: boolean - - in: query - name: enable_synonyms - schema: - type: boolean - - in: query - name: enable_typos_for_alpha_numerical_tokens - schema: - type: boolean - - in: query - name: enable_typos_for_numerical_tokens - schema: - type: boolean - - in: query - name: exclude_fields - schema: - type: string - - in: query - name: exhaustive_search - schema: - type: boolean - - in: query - name: facet_by - schema: - type: string - - in: query - name: facet_query - schema: - type: string - - in: query - name: facet_return_parent - schema: - type: string - - in: query - name: facet_strategy - schema: - type: string - - in: query - name: filter_by - schema: - type: string - - in: query - name: filter_curated_hits - schema: - type: boolean - - in: query - name: group_by - schema: - type: string - - in: query - name: group_limit - schema: - type: integer - - in: query - name: group_missing_values - schema: - type: boolean - - in: query - name: hidden_hits - schema: - type: string - - in: query - name: highlight_affix_num_tokens - schema: - type: integer - - in: query - name: highlight_end_tag - schema: - type: string - - in: query - name: highlight_fields - schema: - type: string - - in: query - name: highlight_full_fields - schema: - type: string - - in: query - name: highlight_start_tag - schema: - type: string - - in: query - name: include_fields - schema: - type: string - - in: query - name: infix + - name: modelId + in: path + description: The ID of the NL search model to delete + required: true schema: type: string - - in: query - name: limit - schema: - type: integer - - in: query - name: max_candidates - schema: - type: integer - - in: query - name: max_extra_prefix - schema: - type: integer - - in: query - name: max_extra_suffix - schema: - type: integer - - in: query - name: max_facet_values - schema: - type: integer - - in: query - name: max_filter_by_candidates - schema: - type: integer - - in: query - name: min_len_1typo - schema: - type: integer - - in: query - name: min_len_2typo - schema: + responses: + '200': + description: NL search model successfully deleted + content: + application/json: + schema: + $ref: '#/components/schemas/NLSearchModelDeleteSchema' + '404': + description: NL search model not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + +components: + schemas: + CollectionSchema: + required: + - name + - fields + type: object + properties: + name: + type: string + description: Name of the collection + example: companies + fields: + type: array + description: A list of fields for querying, filtering and faceting + example: + - name: num_employees + type: int32 + facet: false + - name: company_name + type: string + facet: false + - name: country + type: string + facet: true + items: + $ref: "#/components/schemas/Field" + default_sorting_field: + type: string + description: + The name of an int32 / float field that determines the order in which + the search results are ranked when a sort_by clause is not provided during + searching. This field must indicate some kind of popularity. + example: num_employees # Go with the first field name listed above to produce sane defaults + default: "" + token_separators: + type: array + description: > + List of symbols or special characters to be used for + splitting the text into individual words in addition to space and new-line characters. + items: + type: string # characters only + # Could `enum` be used instead, given it's symbols/special *characters*, e.g.: + # enum: ["@", "!", ".", "/", ","] + minLength: 1 + maxLength: 1 + default: [] + enable_nested_fields: + type: boolean + description: + Enables experimental support at a collection level for nested object or object array fields. + This field is only available if the Typesense server is version `0.24.0.rcn34` or later. + default: false + example: true + symbols_to_index: + type: array + description: > + List of symbols or special characters to be indexed. + items: + type: string # characters only + # Could `enum` be used instead, given it's symbols/special *characters*, e.g.: + # enum: ["@", "!", ".", "/", ","] + minLength: 1 + maxLength: 1 + default: [] + voice_query_model: + $ref: "#/components/schemas/VoiceQueryModelCollectionConfig" + CollectionUpdateSchema: + required: + - fields + type: object + properties: + fields: + type: array + description: A list of fields for querying, filtering and faceting + example: + - name: company_name + type: string + facet: false + - name: num_employees + type: int32 + facet: false + - name: country + type: string + facet: true + items: + $ref: "#/components/schemas/Field" + CollectionResponse: + allOf: + - $ref: "#/components/schemas/CollectionSchema" + - type: object + required: + - num_documents + - created_at + properties: + num_documents: + type: integer + description: Number of documents in the collection + format: int64 + readOnly: true + created_at: + type: integer + description: Timestamp of when the collection was created (Unix epoch in seconds) + format: int64 + readOnly: true + Field: + required: + - name + - type + type: object + properties: + name: + type: string + example: company_name + type: + type: string + example: string + optional: + type: boolean + example: true + facet: + type: boolean + example: false + index: + type: boolean + example: true + default: true + locale: + type: string + example: el + sort: + type: boolean + example: true + infix: + type: boolean + example: true + default: false + reference: + type: string + description: > + Name of a field in another collection that should be linked to this collection so that it can be joined during query. + num_dim: + type: integer + example: 256 + drop: + type: boolean + example: true + # omitting default value since we want it to be null + store: + type: boolean + description: > + When set to false, the field value will not be stored on disk. Default: true. + vec_dist: + type: string + description: > + The distance metric to be used for vector search. Default: `cosine`. You can also use `ip` for inner product. + range_index: + type: boolean + description: > + Enables an index optimized for range filtering on numerical fields (e.g. rating:>3.5). Default: false. + stem: + type: boolean + description: > + Values are stemmed before indexing in-memory. Default: false. + stem_dictionary: + type: string + description: Name of the stemming dictionary to use for this field + example: irregular-plurals + token_separators: + type: array + description: > + List of symbols or special characters to be used for + splitting the text into individual words in addition to space and new-line characters. + items: + type: string # characters only + # Could `enum` be used instead, given it's symbols/special *characters*, e.g.: + # enum: ["@", "!", ".", "/", ","] + minLength: 1 + maxLength: 1 + default: [] + symbols_to_index: + type: array + description: > + List of symbols or special characters to be indexed. + items: + type: string # characters only + # Could `enum` be used instead, given it's symbols/special *characters*, e.g.: + # enum: ["@", "!", ".", "/", ","] + minLength: 1 + maxLength: 1 + default: [] + embed: + type: object + required: + - from + - model_config + properties: + from: + type: array + items: + type: string + model_config: + type: object + required: + - model_name + properties: + model_name: + type: string + api_key: + type: string + url: + type: string + access_token: + type: string + refresh_token: + type: string + client_id: + type: string + client_secret: + type: string + project_id: + type: string + indexing_prefix: + type: string + query_prefix: + type: string + VoiceQueryModelCollectionConfig: + type: object + description: > + Configuration for the voice query model + properties: + model_name: + type: string + example: "ts/whisper/base.en" + CollectionAliasSchema: + type: object + required: + - collection_name + properties: + collection_name: + type: string + description: Name of the collection you wish to map the alias to + CollectionAlias: + type: object + required: + - collection_name + - name + properties: + name: + type: string + readOnly: true + description: Name of the collection alias + collection_name: + type: string + description: Name of the collection the alias mapped to + CollectionAliasesResponse: + type: object + required: + - aliases + properties: + aliases: + type: array + x-go-type: "[]*CollectionAlias" + items: + $ref: "#/components/schemas/CollectionAlias" + SearchResult: + type: object + properties: + facet_counts: + type: array + items: + $ref: "#/components/schemas/FacetCounts" + found: + type: integer + description: The number of documents found + found_docs: + type: integer + search_time_ms: + type: integer + description: The number of milliseconds the search took + out_of: + type: integer + description: The total number of documents in the collection + search_cutoff: + type: boolean + description: Whether the search was cut off + page: + type: integer + description: The search result page number + grouped_hits: + type: array + items: + $ref: "#/components/schemas/SearchGroupedHit" + hits: + type: array + description: The documents that matched the search query + items: + $ref: "#/components/schemas/SearchResultHit" + request_params: + type: object + required: + - collection_name + - q + - per_page + properties: + collection_name: + type: string + q: + type: string + per_page: + type: integer + voice_query: + type: object + properties: + transcribed_query: + type: string + conversation: + $ref: "#/components/schemas/SearchResultConversation" + SearchResultConversation: + type: object + required: + - answer + - conversation_history + - conversation_id + - query + properties: + answer: + type: string + conversation_history: + type: array + items: + type: object + conversation_id: + type: string + query: + type: string + SearchGroupedHit: + type: object + required: + - group_key + - hits + properties: + found: + type: integer + group_key: + type: array + items: {} + hits: + type: array + description: The documents that matched the search query + items: + $ref: "#/components/schemas/SearchResultHit" + SearchResultHit: + type: object + properties: + highlights: + type: array + description: (Deprecated) Contains highlighted portions of the search fields + items: + $ref: "#/components/schemas/SearchHighlight" + highlight: + type: object + description: Highlighted version of the matching document + additionalProperties: true + document: + type: object + description: Can be any key-value pair + additionalProperties: + type: object + text_match: + type: integer + format: int64 + text_match_info: + type: object + properties: + best_field_score: + type: string + best_field_weight: + type: integer + fields_matched: + type: integer + num_tokens_dropped: + type: integer + format: int64 + x-go-type: uint64 + score: + type: string + tokens_matched: + type: integer + typo_prefix_score: + type: integer + geo_distance_meters: + type: object + description: Can be any key-value pair + additionalProperties: type: integer - - in: query - name: num_typos - schema: + vector_distance: + type: number + format: float + description: Distance between the query vector and matching document's vector value + example: + highlights: + company_name: + field: company_name + snippet: Stark Industries + document: + id: "124" + company_name: Stark Industries + num_employees: 5215 + country: USA + text_match: 1234556 + SearchHighlight: + type: object + properties: + field: + type: string + example: company_name + snippet: + type: string + description: Present only for (non-array) string fields + example: Stark Industries + snippets: + type: array + description: Present only for (array) string[] fields + example: + - Stark Industries + - Stark Corp + items: type: string - - in: query - name: offset - schema: - type: integer - - in: query - name: override_tags - schema: + value: + type: string + description: Full field value with highlighting, present only for (non-array) string fields + example: Stark Industries is a major supplier of space equipment. + values: + type: array + description: Full field value with highlighting, present only for (array) string[] fields + example: + - Stark Industries + - Stark Corp + items: type: string - - in: query - name: page - schema: - type: integer - - in: query - name: per_page - schema: + indices: + type: array + description: The indices property will be present only for string[] + fields and will contain the corresponding indices of the snippets + in the search field + example: 1 + items: type: integer - - in: query - name: pinned_hits - schema: - type: string - - in: query - name: pre_segmented_query - schema: - type: boolean - - in: query - name: prefix - schema: - type: string - - in: query - name: preset - schema: - type: string - - in: query - name: prioritize_exact_match - schema: - type: boolean - - in: query - name: prioritize_num_matching_fields - schema: - type: boolean - - in: query - name: prioritize_token_position - schema: - type: boolean - - in: query - name: q - schema: - type: string - - in: query - name: query_by - schema: - type: string - - in: query - name: query_by_weights - schema: + matched_tokens: + type: array + items: + type: object + x-go-type: "interface{}" + SearchOverrideSchema: + type: object + required: + - rule + properties: + rule: + $ref: "#/components/schemas/SearchOverrideRule" + includes: + type: array + description: + List of document `id`s that should be included in the search results with their + corresponding `position`s. + items: + $ref: "#/components/schemas/SearchOverrideInclude" + excludes: + type: array + description: List of document `id`s that should be excluded from the search results. + items: + $ref: "#/components/schemas/SearchOverrideExclude" + filter_by: + type: string + description: > + A filter by clause that is applied to any search query that matches the override rule. + remove_matched_tokens: + type: boolean + description: > + Indicates whether search query tokens that exist in the override's rule should be removed from the search query. + metadata: + type: object + description: > + Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. + sort_by: + type: string + description: > + A sort by clause that is applied to any search query that matches the override rule. + replace_query: + type: string + description: > + Replaces the current search query with this value, when the search query matches the override rule. + filter_curated_hits: + type: boolean + description: > + When set to true, the filter conditions of the query is applied to the curated records as well. + Default: false. + effective_from_ts: + type: integer + description: > + A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. + effective_to_ts: + type: integer + description: > + A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. + stop_processing: + type: boolean + description: > + When set to true, override processing will stop at the first matching rule. When set to false override processing will continue and multiple override actions will be triggered in sequence. + Overrides are processed in the lexical sort order of their id field. + Default: true. + SearchOverride: + allOf: + - $ref: "#/components/schemas/SearchOverrideSchema" + - type: object + required: + - id + properties: + id: + type: string + readOnly: true + SearchOverrideDeleteResponse: + type: object + required: + - id + properties: + id: + type: string + description: The id of the override that was deleted + SearchOverrideRule: + type: object + properties: + tags: + type: array + description: List of tag values to associate with this override rule. + items: type: string - - in: query - name: remote_embedding_num_tries - schema: - type: integer - - in: query - name: remote_embedding_timeout_ms - schema: - type: integer - - in: query - name: search_cutoff_ms - schema: - type: integer - - in: query - name: snippet_threshold - schema: - type: integer - - in: query - name: sort_by - schema: + query: + type: string + description: Indicates what search queries should be overridden + match: + type: string + description: > + Indicates whether the match on the query term should be `exact` or `contains`. + If we want to match all queries that contained + the word `apple`, we will use the `contains` match instead. + enum: + - exact + - contains + filter_by: + type: string + description: > + Indicates that the override should apply when the filter_by parameter in a search query exactly matches the string specified here (including backticks, spaces, brackets, etc). + SearchOverrideInclude: + type: object + required: + - id + - position + properties: + id: + type: string + description: document id that should be included + position: + type: integer + description: position number where document should be included in the search results + SearchOverrideExclude: + type: object + required: + - id + properties: + id: + type: string + description: document id that should be excluded from the search results. + SearchOverridesResponse: + type: object + required: + - overrides + properties: + overrides: + type: array + x-go-type: "[]*SearchOverride" + items: + $ref: "#/components/schemas/SearchOverride" + SearchSynonymSchema: + type: object + required: + - synonyms + properties: + root: + type: string + description: For 1-way synonyms, indicates the root word that words in the `synonyms` parameter map to. + synonyms: + type: array + description: Array of words that should be considered as synonyms. + items: type: string - - in: query - name: split_join_tokens - schema: + locale: + type: string + description: Locale for the synonym, leave blank to use the standard tokenizer. + symbols_to_index: + type: array + description: By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is. + items: type: string - - in: query - name: stopwords - schema: + SearchSynonym: + allOf: + - $ref: "#/components/schemas/SearchSynonymSchema" + - type: object + required: + - id + properties: + id: + type: string + readOnly: true + SearchSynonymDeleteResponse: + type: object + required: + - id + properties: + id: + type: string + description: The id of the synonym that was deleted + SearchSynonymsResponse: + type: object + required: + - synonyms + properties: + synonyms: + type: array + x-go-type: "[]*SearchSynonym" + items: + $ref: "#/components/schemas/SearchSynonym" + HealthStatus: + type: object + required: + - ok + properties: + ok: + type: boolean + SchemaChangeStatus: + type: object + properties: + collection: + type: string + description: Name of the collection being modified + validated_docs: + type: integer + description: Number of documents that have been validated + altered_docs: + type: integer + description: Number of documents that have been altered + SuccessStatus: + type: object + required: + - success + properties: + success: + type: boolean + ApiResponse: + type: object + required: + - message + properties: + message: + type: string + ApiKeySchema: + type: object + required: + - actions + - collections + - description + properties: + value: + type: string + description: + type: string + actions: + type: array + items: type: string - - in: query - name: synonym_num_typos - schema: - type: integer - - in: query - name: synonym_prefix - schema: - type: boolean - - in: query - name: text_match_type - schema: + collections: + type: array + items: type: string - - in: query - name: typo_tokens_threshold - schema: + expires_at: + type: integer + format: int64 + ApiKey: + allOf: + - $ref: "#/components/schemas/ApiKeySchema" + - type: object + properties: + id: + type: integer + format: int64 + readOnly: true + value_prefix: + type: string + readOnly: true + ApiKeyDeleteResponse: + type: object + required: + - id + properties: + id: + type: integer + format: int64 + description: The id of the API key that was deleted + ApiKeysResponse: + type: object + required: + - keys + properties: + keys: + type: array + x-go-type: "[]*ApiKey" + items: + $ref: "#/components/schemas/ApiKey" + ScopedKeyParameters: + type: object + properties: + filter_by: + type: string + expires_at: + type: integer + format: int64 + SnapshotParameters: + type: object + properties: + snapshot_path: + type: string + ErrorResponse: + type: object + properties: + message: + type: string + MultiSearchResult: + type: object + required: + - results + properties: + results: + type: array + items: + $ref: "#/components/schemas/MultiSearchResultItem" + conversation: + $ref: "#/components/schemas/SearchResultConversation" + MultiSearchResultItem: + allOf: + - $ref: "#/components/schemas/SearchResult" + - type: object + properties: + code: type: integer - - in: query - name: use_cache - schema: - type: boolean - - in: query - name: vector_query - schema: - type: string - - in: query - name: voice_query - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/MultiSearchSearchesParameter' - responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/MultiSearchResult' - description: Search results - "400": - content: - application/json: - schema: - $ref: '#/components/schemas/ApiResponse' - description: Bad request, see error message for details - summary: send multiple search requests in a single HTTP request - tags: - - documents - /operations/schema_changes: - get: - description: Returns the status of any ongoing schema change operations. If no schema changes are in progress, returns an empty response. - operationId: getSchemaChanges - responses: - "200": - content: - application/json: - schema: - items: - $ref: '#/components/schemas/SchemaChangeStatus' - type: array - description: List of schema changes in progress - summary: Get the status of in-progress schema change operations - tags: - - operations - /operations/snapshot: - post: - description: Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. You can then backup the snapshot directory that gets created and later restore it as a data directory, as needed. - operationId: takeSnapshot - parameters: - - description: The directory on the server where the snapshot should be saved. - in: query - name: snapshot_path - required: true - schema: - type: string - responses: - "201": - content: - application/json: - schema: - $ref: '#/components/schemas/SuccessStatus' - description: Snapshot is created. - summary: Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. - tags: - - operations - /operations/vote: - post: - description: Triggers a follower node to initiate the raft voting process, which triggers leader re-election. The follower node that you run this operation against will become the new leader, once this command succeeds. - operationId: vote - responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/SuccessStatus' - description: Re-election is performed. - summary: Triggers a follower node to initiate the raft voting process, which triggers leader re-election. - tags: - - operations - /presets: - get: - description: Retrieve the details of all presets - operationId: retrieveAllPresets - responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/PresetsRetrieveSchema' - description: Presets fetched. - summary: Retrieves all presets. - tags: - - presets - /presets/{presetId}: - delete: - description: Permanently deletes a preset, given it's name. - operationId: deletePreset - parameters: - - description: The ID of the preset to delete. - example: listing_view - in: path - name: presetId - required: true - schema: + description: HTTP error code + format: int64 + error: + type: string + description: Error description + SearchParameters: + type: object + properties: + q: + description: The query text to search for in the collection. + Use * as the search string to return all documents. + This is typically useful when used in conjunction with filter_by. + type: string + + query_by: + description: A list of `string` fields that should be queried + against. Multiple fields are separated with a comma. + type: string + + nl_query: + description: Whether to use natural language processing to parse the query. + type: boolean + + nl_model_id: + description: The ID of the natural language model to use. + type: string + + query_by_weights: + description: + The relative weight to give each `query_by` field when ranking results. + This can be used to boost fields in priority, when looking for matches. + Multiple fields are separated with a comma. + type: string + + text_match_type: + description: + In a multi-field matching context, this parameter determines how the representative text match + score of a record is calculated. Possible values are max_score (default) or max_weight. + type: string + + prefix: + description: + Boolean field to indicate that the last word in the query should + be treated as a prefix, and not as a whole word. This is used for building + autocomplete and instant search interfaces. Defaults to true. + type: string + + infix: + description: + If infix index is enabled for this field, infix searching can be done on a per-field + basis by sending a comma separated string parameter called infix to the search query. + This parameter can have 3 values; `off` infix search is disabled, which is default + `always` infix search is performed along with regular search + `fallback` infix search is performed if regular search does not produce results + type: string + + max_extra_prefix: + description: + There are also 2 parameters that allow you to control the extent of infix searching + max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before + or after the query that can be present in the token. For example query "K2100" has 2 extra + symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + type: integer + + max_extra_suffix: + description: + There are also 2 parameters that allow you to control the extent of infix searching + max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before + or after the query that can be present in the token. For example query "K2100" has 2 extra + symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + type: integer + + filter_by: + description: + Filter conditions for refining youropen api validator search results. Separate + multiple conditions with &&. + type: string + example: "num_employees:>100 && country: [USA, UK]" + + max_filter_by_candidates: + description: + Controls the number of similar words that Typesense considers during fuzzy search + on filter_by values. Useful for controlling prefix matches like company_name:Acm*. + type: integer + + sort_by: + description: + A list of numerical fields and their corresponding sort orders + that will be used for ordering your results. + Up to 3 sort fields can be specified. + The text similarity score is exposed as a special `_text_match` field that + you can use in the list of sorting fields. + If no `sort_by` parameter is specified, results are sorted by + `_text_match:desc,default_sorting_field:desc` + type: string + example: num_employees:desc + + facet_by: + description: + A list of fields that will be used for faceting your results + on. Separate multiple fields with a comma. + type: string + + max_facet_values: + description: Maximum number of facet values to be returned. + type: integer + + facet_query: + description: + Facet values that are returned can now be filtered via this parameter. + The matching facet text is also highlighted. For example, when faceting + by `category`, you can set `facet_query=category:shoe` to return only + facet values that contain the prefix "shoe". + type: string + + num_typos: + description: > + The number of typographical errors (1 or 2) that would be tolerated. + Default: 2 + type: string + + page: + description: Results from this specific page number would be fetched. + type: integer + + per_page: + description: "Number of results to fetch per page. Default: 10" + type: integer + + limit: + description: > + Number of hits to fetch. Can be used as an alternative to the per_page parameter. + Default: 10. + type: integer + + offset: + description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. + type: integer + + group_by: + description: + You can aggregate search results into groups or buckets by specify + one or more `group_by` fields. Separate multiple fields with a comma. + To group on a particular field, it must be a faceted field. + type: string + + group_limit: + description: > + Maximum number of hits to be returned for every group. If the `group_limit` is + set as `K` then only the top K hits in each group are returned in the response. + Default: 3 + type: integer + + group_missing_values: + description: > + Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. + Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. + Default: true + type: boolean + + include_fields: + description: List of fields from the document to include in the search result + type: string + + exclude_fields: + description: List of fields from the document to exclude in the search result + type: string + + highlight_full_fields: + description: List of fields which should be highlighted fully without snippeting + type: string + + highlight_affix_num_tokens: + description: > + The number of tokens that should surround the highlighted text on each side. + Default: 4 + type: integer + + highlight_start_tag: + description: > + The start tag used for the highlighted snippets. + Default: `` + type: string + highlight_end_tag: + description: > + The end tag used for the highlighted snippets. + Default: `` + type: string + + enable_highlight_v1: + description: > + Flag for enabling/disabling the deprecated, old highlight structure in the response. + Default: true + type: boolean + default: true + + snippet_threshold: + description: > + Field values under this length will be fully highlighted, instead of showing + a snippet of relevant portion. Default: 30 + type: integer + + drop_tokens_threshold: + description: > + If the number of results found for a specific query is less than + this number, Typesense will attempt to drop the tokens in the query until + enough results are found. Tokens that have the least individual hits + are dropped first. Set to 0 to disable. Default: 10 + type: integer + drop_tokens_mode: + $ref: "#/components/schemas/DropTokensMode" + typo_tokens_threshold: + description: > + If the number of results found for a specific query is less than this number, + Typesense will attempt to look for tokens with more typos until + enough results are found. Default: 100 + type: integer + enable_typos_for_alpha_numerical_tokens: + type: boolean + description: > + Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. + + filter_curated_hits: + type: boolean + description: > + Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false + enable_synonyms: + type: boolean + description: > + If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true + synonym_prefix: + type: boolean + description: > + Allow synonym resolution on word prefixes in the query. Default: false + synonym_num_typos: + type: integer + description: > + Allow synonym resolution on typo-corrected words in the query. Default: 0 + + pinned_hits: + description: > + A list of records to unconditionally include in the search results + at specific positions. An example use case would be to feature or promote + certain items on the top of search results. + A list of `record_id:hit_position`. Eg: to include a record with ID 123 + at Position 1 and another record with ID 456 at Position 5, + you'd specify `123:1,456:5`. + + You could also use the Overrides feature to override search results based + on rules. Overrides are applied first, followed by `pinned_hits` and + finally `hidden_hits`. + type: string + + hidden_hits: + description: > + A list of records to unconditionally hide from search results. + A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, + you'd specify `123,456`. + + You could also use the Overrides feature to override search results based + on rules. Overrides are applied first, followed by `pinned_hits` and + finally `hidden_hits`. + type: string + + override_tags: + description: Comma separated list of tags to trigger the curations rules that match the tags. + type: string + + highlight_fields: + description: > + A list of custom fields that must be highlighted even if you don't query + for them + type: string + + split_join_tokens: + description: > + Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. + Splitting/joining of tokens will only be attempted if the original query produces no results. + To always trigger this behavior, set value to `always``. + To disable, set value to `off`. Default is `fallback`. + type: string + + pre_segmented_query: + description: > + You can index content from any logographic language into Typesense if you + are able to segment / split the text into space-separated words yourself + before indexing and querying. + + Set this parameter to true to do the same + type: boolean + + preset: + description: > + Search using a bunch of search parameters by setting this parameter to + the name of the existing Preset. + type: string + + enable_overrides: + description: > + If you have some overrides defined but want to disable all of them during + query time, you can do that by setting this parameter to false + type: boolean + default: false + + prioritize_exact_match: + description: > + Set this parameter to true to ensure that an exact match is ranked above + the others + type: boolean + default: true + max_candidates: + description: > + Control the number of words that Typesense considers for typo and prefix searching. + type: integer + prioritize_token_position: + description: > + Make Typesense prioritize documents where the query words appear earlier in the text. + type: boolean + default: false + prioritize_num_matching_fields: + description: > + Make Typesense prioritize documents where the query words appear in more number of fields. + type: boolean + default: true + enable_typos_for_numerical_tokens: + description: > + Make Typesense disable typos for numerical tokens. + type: boolean + default: true + exhaustive_search: + description: > + Setting this to true will make Typesense consider all prefixes and typo + corrections of the words in the query without stopping early when enough results are found + (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). + type: boolean + search_cutoff_ms: + description: > + Typesense will attempt to return results early if the cutoff time has elapsed. + This is not a strict guarantee and facet computation is not bound by this parameter. + type: integer + use_cache: + description: > + Enable server side caching of search query results. By default, caching is disabled. + type: boolean + cache_ttl: + description: > + The duration (in seconds) that determines how long the search query is cached. + This value can be set on a per-query basis. Default: 60. + type: integer + min_len_1typo: + description: > + Minimum word length for 1-typo correction to be applied. + The value of num_typos is still treated as the maximum allowed typos. + type: integer + min_len_2typo: + description: > + Minimum word length for 2-typo correction to be applied. + The value of num_typos is still treated as the maximum allowed typos. + type: integer + vector_query: + description: > + Vector query expression for fetching documents "closest" to a given query/document vector. + type: string + remote_embedding_timeout_ms: + description: > + Timeout (in milliseconds) for fetching remote embeddings. + type: integer + remote_embedding_num_tries: + description: > + Number of times to retry fetching remote embeddings. + type: integer + facet_strategy: + description: > + Choose the underlying faceting strategy used. Comma separated string of allows values: + exhaustive, top_values or automatic (default). + type: string + stopwords: + description: > + Name of the stopwords set to apply for this search, + the keywords present in the set will be removed from the search query. + type: string + facet_return_parent: + description: > + Comma separated string of nested facet fields whose parent object should be returned in facet response. + type: string + voice_query: + description: > + The base64 encoded audio file in 16 khz 16-bit WAV format. + type: string + conversation: + description: > + Enable conversational search. + type: boolean + conversation_model_id: + description: > + The Id of Conversation Model to be used. + type: string + conversation_id: + description: > + The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. + type: string + + MultiSearchParameters: + description: > + Parameters for the multi search API. + type: object + properties: + q: + description: The query text to search for in the collection. + Use * as the search string to return all documents. + This is typically useful when used in conjunction with filter_by. + type: string + + query_by: + description: A list of `string` fields that should be queried + against. Multiple fields are separated with a comma. + type: string + + query_by_weights: + description: + The relative weight to give each `query_by` field when ranking results. + This can be used to boost fields in priority, when looking for matches. + Multiple fields are separated with a comma. + type: string + + text_match_type: + description: + In a multi-field matching context, this parameter determines how the representative text match + score of a record is calculated. Possible values are max_score (default) or max_weight. + type: string + + prefix: + description: + Boolean field to indicate that the last word in the query should + be treated as a prefix, and not as a whole word. This is used for building + autocomplete and instant search interfaces. Defaults to true. + type: string + + infix: + description: + If infix index is enabled for this field, infix searching can be done on a per-field + basis by sending a comma separated string parameter called infix to the search query. + This parameter can have 3 values; `off` infix search is disabled, which is default + `always` infix search is performed along with regular search + `fallback` infix search is performed if regular search does not produce results + type: string + + max_extra_prefix: + description: + There are also 2 parameters that allow you to control the extent of infix searching + max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before + or after the query that can be present in the token. For example query "K2100" has 2 extra + symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + type: integer + + max_extra_suffix: + description: + There are also 2 parameters that allow you to control the extent of infix searching + max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before + or after the query that can be present in the token. For example query "K2100" has 2 extra + symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + type: integer + + filter_by: + description: + Filter conditions for refining youropen api validator search results. Separate + multiple conditions with &&. + type: string + example: "num_employees:>100 && country: [USA, UK]" + + sort_by: + description: + A list of numerical fields and their corresponding sort orders + that will be used for ordering your results. + Up to 3 sort fields can be specified. + The text similarity score is exposed as a special `_text_match` field that + you can use in the list of sorting fields. + If no `sort_by` parameter is specified, results are sorted by + `_text_match:desc,default_sorting_field:desc` + type: string + + facet_by: + description: + A list of fields that will be used for faceting your results + on. Separate multiple fields with a comma. + type: string + + max_facet_values: + description: Maximum number of facet values to be returned. + type: integer + + facet_query: + description: + Facet values that are returned can now be filtered via this parameter. + The matching facet text is also highlighted. For example, when faceting + by `category`, you can set `facet_query=category:shoe` to return only + facet values that contain the prefix "shoe". + type: string + + num_typos: + description: > + The number of typographical errors (1 or 2) that would be tolerated. + Default: 2 + type: string + + page: + description: Results from this specific page number would be fetched. + type: integer + + per_page: + description: "Number of results to fetch per page. Default: 10" + type: integer + + limit: + description: > + Number of hits to fetch. Can be used as an alternative to the per_page parameter. + Default: 10. + type: integer + + offset: + description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. + type: integer + + group_by: + description: + You can aggregate search results into groups or buckets by specify + one or more `group_by` fields. Separate multiple fields with a comma. + To group on a particular field, it must be a faceted field. + type: string + + group_limit: + description: > + Maximum number of hits to be returned for every group. If the `group_limit` is + set as `K` then only the top K hits in each group are returned in the response. + Default: 3 + type: integer + + group_missing_values: + description: > + Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. + Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. + Default: true + type: boolean + + include_fields: + description: List of fields from the document to include in the search result + type: string + + exclude_fields: + description: List of fields from the document to exclude in the search result + type: string + + highlight_full_fields: + description: List of fields which should be highlighted fully without snippeting + type: string + + highlight_affix_num_tokens: + description: > + The number of tokens that should surround the highlighted text on each side. + Default: 4 + type: integer + + highlight_start_tag: + description: > + The start tag used for the highlighted snippets. + Default: `` + type: string + highlight_end_tag: + description: > + The end tag used for the highlighted snippets. + Default: `` + type: string + + snippet_threshold: + description: > + Field values under this length will be fully highlighted, instead of showing + a snippet of relevant portion. Default: 30 + type: integer + + drop_tokens_threshold: + description: > + If the number of results found for a specific query is less than + this number, Typesense will attempt to drop the tokens in the query until + enough results are found. Tokens that have the least individual hits + are dropped first. Set to 0 to disable. Default: 10 + type: integer + drop_tokens_mode: + $ref: "#/components/schemas/DropTokensMode" + typo_tokens_threshold: + description: > + If the number of results found for a specific query is less than this number, + Typesense will attempt to look for tokens with more typos until + enough results are found. Default: 100 + type: integer + enable_typos_for_alpha_numerical_tokens: + type: boolean + description: > + Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. + + filter_curated_hits: + type: boolean + description: > + Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false + enable_synonyms: + type: boolean + description: > + If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true + synonym_prefix: + type: boolean + description: > + Allow synonym resolution on word prefixes in the query. Default: false + synonym_num_typos: + type: integer + description: > + Allow synonym resolution on typo-corrected words in the query. Default: 0 + + pinned_hits: + description: > + A list of records to unconditionally include in the search results + at specific positions. An example use case would be to feature or promote + certain items on the top of search results. + A list of `record_id:hit_position`. Eg: to include a record with ID 123 + at Position 1 and another record with ID 456 at Position 5, + you'd specify `123:1,456:5`. + + You could also use the Overrides feature to override search results based + on rules. Overrides are applied first, followed by `pinned_hits` and + finally `hidden_hits`. + type: string + + hidden_hits: + description: > + A list of records to unconditionally hide from search results. + A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, + you'd specify `123,456`. + + You could also use the Overrides feature to override search results based + on rules. Overrides are applied first, followed by `pinned_hits` and + finally `hidden_hits`. + type: string + + override_tags: + description: Comma separated list of tags to trigger the curations rules that match the tags. + type: string + + highlight_fields: + description: > + A list of custom fields that must be highlighted even if you don't query + for them + type: string + + pre_segmented_query: + description: > + You can index content from any logographic language into Typesense if you + are able to segment / split the text into space-separated words yourself + before indexing and querying. + + Set this parameter to true to do the same + type: boolean + default: false + + preset: + description: > + Search using a bunch of search parameters by setting this parameter to + the name of the existing Preset. + type: string + + enable_overrides: + description: > + If you have some overrides defined but want to disable all of them during + query time, you can do that by setting this parameter to false + type: boolean + default: false + + prioritize_exact_match: + description: > + Set this parameter to true to ensure that an exact match is ranked above + the others + type: boolean + default: true + + prioritize_token_position: + description: > + Make Typesense prioritize documents where the query words appear earlier in the text. + type: boolean + default: false + + prioritize_num_matching_fields: + description: > + Make Typesense prioritize documents where the query words appear in more number of fields. + type: boolean + default: true + + enable_typos_for_numerical_tokens: + description: > + Make Typesense disable typos for numerical tokens. + type: boolean + default: true + + exhaustive_search: + description: > + Setting this to true will make Typesense consider all prefixes and typo + corrections of the words in the query without stopping early when enough results are found + (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). + type: boolean + search_cutoff_ms: + description: > + Typesense will attempt to return results early if the cutoff time has elapsed. + This is not a strict guarantee and facet computation is not bound by this parameter. + type: integer + use_cache: + description: > + Enable server side caching of search query results. By default, caching is disabled. + type: boolean + cache_ttl: + description: > + The duration (in seconds) that determines how long the search query is cached. + This value can be set on a per-query basis. Default: 60. + type: integer + min_len_1typo: + description: > + Minimum word length for 1-typo correction to be applied. + The value of num_typos is still treated as the maximum allowed typos. + type: integer + min_len_2typo: + description: > + Minimum word length for 2-typo correction to be applied. + The value of num_typos is still treated as the maximum allowed typos. + type: integer + vector_query: + description: > + Vector query expression for fetching documents "closest" to a given query/document vector. + type: string + remote_embedding_timeout_ms: + description: > + Timeout (in milliseconds) for fetching remote embeddings. + type: integer + remote_embedding_num_tries: + description: > + Number of times to retry fetching remote embeddings. + type: integer + facet_strategy: + description: > + Choose the underlying faceting strategy used. Comma separated string of allows values: + exhaustive, top_values or automatic (default). + type: string + stopwords: + description: > + Name of the stopwords set to apply for this search, + the keywords present in the set will be removed from the search query. + type: string + facet_return_parent: + description: > + Comma separated string of nested facet fields whose parent object should be returned in facet response. + type: string + voice_query: + description: > + The base64 encoded audio file in 16 khz 16-bit WAV format. + type: string + conversation: + description: > + Enable conversational search. + type: boolean + conversation_model_id: + description: > + The Id of Conversation Model to be used. + type: string + conversation_id: + description: > + The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. + type: string + MultiSearchSearchesParameter: + type: object + required: + - searches + properties: + union: + type: boolean + description: When true, merges the search results from each search query into a single ordered set of hits. + searches: + type: array + items: + $ref: "#/components/schemas/MultiSearchCollectionParameters" + MultiSearchCollectionParameters: + allOf: + - $ref: "#/components/schemas/MultiSearchParameters" + - type: object + properties: + collection: + type: string + description: > + The collection to search in. + x-typesense-api-key: + type: string + description: A separate search API key for each search within a multi_search request + rerank_hybrid_matches: + type: boolean + description: > + When true, computes both text match and vector distance scores for all matches in hybrid search. + Documents found only through keyword search will get a vector distance score, and + documents found only through vector search will get a text match score. + default: false + FacetCounts: + type: object + properties: + counts: + type: array + items: + type: object + properties: + count: + type: integer + highlighted: + type: string + value: + type: string + parent: + type: object + field_name: + type: string + stats: + type: object + properties: + max: + type: number + format: double + min: + type: number + format: double + sum: + type: number + format: double + total_values: + type: integer + avg: + type: number + format: double + AnalyticsEventCreateResponse: + type: object + required: + - ok + properties: + ok: + type: boolean + AnalyticsEventCreateSchema: + type: object + required: + - type + - name + - data + properties: + type: + type: string + name: + type: string + data: + type: object + AnalyticsRuleUpsertSchema: + type: object + required: + - type + - params + properties: + type: + type: string + enum: + - popular_queries + - nohits_queries + - counter + params: + $ref: "#/components/schemas/AnalyticsRuleParameters" + AnalyticsRuleParameters: + type: object + required: + - source + - destination + properties: + source: + $ref: '#/components/schemas/AnalyticsRuleParametersSource' + destination: + $ref: '#/components/schemas/AnalyticsRuleParametersDestination' + limit: + type: integer + expand_query: + type: boolean + AnalyticsRuleParametersSource: + type: object + required: + - collections + properties: + collections: + type: array + items: type: string - responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/PresetDeleteSchema' - description: Preset deleted. - "404": - content: - application/json: - schema: - $ref: '#/components/schemas/ApiResponse' - description: Preset not found. - summary: Delete a preset. - tags: - - presets - get: - description: Retrieve the details of a preset, given it's name. - operationId: retrievePreset - parameters: - - description: The ID of the preset to retrieve. - example: listing_view - in: path - name: presetId - required: true - schema: + events: + type: array + items: + type: object + required: + - type + - weight + - name + properties: + type: + type: string + weight: + type: number + format: float + name: + type: string + AnalyticsRuleParametersDestination: + type: object + required: + - collection + properties: + collection: + type: string + counter_field: + type: string + AnalyticsRuleDeleteResponse: + type: object + required: + - name + properties: + name: + type: string + AnalyticsRuleSchema: + allOf: + - $ref: '#/components/schemas/AnalyticsRuleUpsertSchema' + - type: object + required: + - name + properties: + name: + type: string + AnalyticsRulesRetrieveSchema: + type: object + properties: + rules: + type: array + items: + $ref: "#/components/schemas/AnalyticsRuleSchema" + x-go-type: '[]*AnalyticsRuleSchema' + APIStatsResponse: + type: object + properties: + delete_latency_ms: + type: number + format: double + delete_requests_per_second: + type: number + format: double + import_latency_ms: + type: number + format: double + import_requests_per_second: + type: number + format: double + latency_ms: + type: object + x-go-type: "map[string]float64" + overloaded_requests_per_second: + type: number + format: double + pending_write_batches: + type: number + format: double + requests_per_second: + type: object + x-go-type: "map[string]float64" + search_latency_ms: + type: number + format: double + search_requests_per_second: + type: number + format: double + total_requests_per_second: + type: number + format: double + write_latency_ms: + type: number + format: double + write_requests_per_second: + type: number + format: double + StopwordsSetUpsertSchema: + type: object + properties: + stopwords: + type: array + items: type: string - responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/PresetSchema' - description: Preset fetched. - "404": - content: - application/json: - schema: - $ref: '#/components/schemas/ApiResponse' - description: Preset not found. - summary: Retrieves a preset. - tags: - - presets - put: - description: Create or update an existing preset. - operationId: upsertPreset - parameters: - - description: The name of the preset set to upsert. - example: listing_view - in: path - name: presetId - required: true - schema: + locale: + type: string + required: + - stopwords + example: | + {"stopwords": ["Germany", "France", "Italy"], "locale": "en"} + StopwordsSetSchema: + type: object + properties: + id: + type: string + stopwords: + type: array + items: type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/PresetUpsertSchema' - description: The stopwords set to upsert. - required: true - responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/PresetSchema' - description: Preset successfully upserted. - "400": - content: - application/json: - schema: - $ref: '#/components/schemas/ApiResponse' - description: Bad request, see error message for details. - summary: Upserts a preset. - tags: + locale: + type: string + required: + - id + - stopwords + example: | + {"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"} + StopwordsSetRetrieveSchema: + type: object + properties: + stopwords: + $ref: "#/components/schemas/StopwordsSetSchema" + required: + - stopwords + example: | + {"stopwords": {"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"}} + StopwordsSetsRetrieveAllSchema: + type: object + properties: + stopwords: + type: array + items: + $ref: "#/components/schemas/StopwordsSetSchema" + required: + - stopwords + example: | + {"stopwords": [{"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"}]} + PresetUpsertSchema: + properties: + value: + oneOf: + - $ref: '#/components/schemas/SearchParameters' + - $ref: '#/components/schemas/MultiSearchSearchesParameter' + required: + - value + PresetSchema: + allOf: + - $ref: '#/components/schemas/PresetUpsertSchema' + - type: object + required: + - name + properties: + name: + type: string + PresetsRetrieveSchema: + type: object + required: - presets - /stats.json: - get: - description: Retrieve the stats about API endpoints. - operationId: retrieveAPIStats - responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/APIStatsResponse' - description: Stats fetched. - summary: Get stats about API endpoints. - tags: - - operations - /stemming/dictionaries: - get: - description: Retrieve a list of all available stemming dictionaries. - operationId: listStemmingDictionaries - responses: - "200": - content: - application/json: - schema: - properties: - dictionaries: - example: - - irregular-plurals - - company-terms - items: - type: string - type: array - type: object - description: List of all dictionaries - summary: List all stemming dictionaries - tags: - - stemming - /stemming/dictionaries/{dictionaryId}: - get: - description: Fetch details of a specific stemming dictionary. - operationId: getStemmingDictionary - parameters: - - description: The ID of the dictionary to retrieve - example: irregular-plurals - in: path - name: dictionaryId - required: true - schema: - type: string - responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/StemmingDictionary' - description: Stemming dictionary details - "404": - content: - application/json: - schema: - $ref: '#/components/schemas/ApiResponse' - description: Dictionary not found - summary: Retrieve a stemming dictionary - tags: - - stemming - /stemming/dictionaries/import: - post: - description: Upload a JSONL file containing word mappings to create or update a stemming dictionary. - operationId: importStemmingDictionary - parameters: - - description: The ID to assign to the dictionary - example: irregular-plurals - in: query - name: id - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - example: | - {"word": "people", "root": "person"} - {"word": "children", "root": "child"} + properties: + presets: + type: array + items: + $ref: '#/components/schemas/PresetSchema' + x-go-type: '[]*PresetSchema' + PresetDeleteSchema: + type: object + required: + - name + properties: + name: + type: string + # client libraries already have .create, .upsert,... methods so we omit the `action` param + DocumentIndexParameters: + type: object + properties: + dirty_values: + $ref: "#/components/schemas/DirtyValues" + DirtyValues: + type: string + enum: [coerce_or_reject, coerce_or_drop, drop, reject] + IndexAction: + type: string + enum: [create, update, upsert, emplace] + DropTokensMode: + type: string + enum: [right_to_left, left_to_right, both_sides:3] + description: > + Dictates the direction in which the words in the query must be dropped when the original words in the query do not appear in any document. + Values: right_to_left (default), left_to_right, both_sides:3 + A note on both_sides:3 - for queries upto 3 tokens (words) in length, this mode will drop tokens from both sides and exhaustively rank all matching results. + If query length is greater than 3 words, Typesense will just fallback to default behavior of right_to_left + ConversationModelCreateSchema: + required: + - model_name + - max_bytes + allOf: + - $ref: '#/components/schemas/ConversationModelUpdateSchema' + - type: object + required: + - model_name + - max_bytes + - history_collection + properties: + model_name: + description: Name of the LLM model offered by OpenAI, Cloudflare or vLLM + type: string + max_bytes: + description: | + The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. + type: integer + history_collection: + type: string + description: Typesense collection that stores the historical conversations + ConversationModelUpdateSchema: + type: object + properties: + id: + type: string + description: An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. + model_name: + description: Name of the LLM model offered by OpenAI, Cloudflare or vLLM + type: string + api_key: + description: The LLM service's API Key + type: string + history_collection: + type: string + description: Typesense collection that stores the historical conversations + account_id: + description: LLM service's account ID (only applicable for Cloudflare) + type: string + system_prompt: + description: The system prompt that contains special instructions to the LLM + type: string + ttl: + type: integer + description: | + Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) + max_bytes: + description: | + The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. + type: integer + vllm_url: + description: URL of vLLM service + type: string + ConversationModelSchema: + allOf: + - $ref: '#/components/schemas/ConversationModelCreateSchema' + - type: object + required: + - id + properties: + id: type: string - description: The JSONL file containing word mappings - required: true - responses: - "200": - content: - application/octet-stream: - schema: - example: | - {"word": "people", "root": "person"} {"word": "children", "root": "child"} + description: An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. + StemmingDictionary: + type: object + required: + - id + - words + properties: + id: + type: string + description: Unique identifier for the dictionary + example: irregular-plurals + words: + type: array + description: List of word mappings in the dictionary + items: + type: object + required: + - word + - root + properties: + word: type: string - description: Dictionary successfully imported - "400": - content: - application/json: - schema: - $ref: '#/components/schemas/ApiResponse' - description: Bad request, see error message for details - summary: Import a stemming dictionary - tags: - - stemming - /stopwords: - get: - description: Retrieve the details of all stopwords sets - operationId: retrieveStopwordsSets - responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/StopwordsSetsRetrieveAllSchema' - description: Stopwords sets fetched. - summary: Retrieves all stopwords sets. - tags: - - stopwords - /stopwords/{setId}: - delete: - description: Permanently deletes a stopwords set, given it's name. - operationId: deleteStopwordsSet - parameters: - - description: The ID of the stopwords set to delete. - example: countries - in: path - name: setId - required: true - schema: - type: string - responses: - "200": - content: - application/json: - schema: - example: | - {"id": "countries"} - properties: - id: - type: string - required: - - id - type: object - description: Stopwords set rule deleted. - "404": - content: - application/json: - schema: - $ref: '#/components/schemas/ApiResponse' - description: Stopwords set not found. - summary: Delete a stopwords set. - tags: - - stopwords - get: - description: Retrieve the details of a stopwords set, given it's name. - operationId: retrieveStopwordsSet - parameters: - - description: The ID of the stopwords set to retrieve. - example: countries - in: path - name: setId - required: true - schema: - type: string - responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/StopwordsSetRetrieveSchema' - description: Stopwords set fetched. - "404": - content: - application/json: - schema: - $ref: '#/components/schemas/ApiResponse' - description: Stopwords set not found. - summary: Retrieves a stopwords set. - tags: - - stopwords - put: - description: When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. - operationId: upsertStopwordsSet - parameters: - - description: The ID of the stopwords set to upsert. - example: countries - in: path - name: setId - required: true - schema: + description: The word form to be stemmed + example: people + root: + type: string + description: The root form of the word + example: person + NLSearchModelBase: + type: object + properties: + model_name: + type: string + description: Name of the NL model to use + api_key: + type: string + description: API key for the NL model service + api_url: + type: string + description: Custom API URL for the NL model service + max_bytes: + type: integer + description: Maximum number of bytes to process + temperature: + type: number + description: Temperature parameter for the NL model + system_prompt: + type: string + description: System prompt for the NL model + top_p: + type: number + description: Top-p parameter for the NL model (Google-specific) + top_k: + type: integer + description: Top-k parameter for the NL model (Google-specific) + stop_sequences: + type: array + items: type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/StopwordsSetUpsertSchema' - description: The stopwords set to upsert. - required: true - responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/StopwordsSetSchema' - description: Stopwords set successfully upserted. - "400": - content: - application/json: - schema: - $ref: '#/components/schemas/ApiResponse' - description: Bad request, see error message for details. - summary: Upserts a stopwords set. - tags: - - stopwords -security: - - api_key_header: [] -tags: - - description: A collection is defined by a schema - externalDocs: - description: Find out more - url: https://typesense.org/api/#create-collection - name: collections - - description: A document is an individual record to be indexed and belongs to a collection - externalDocs: - description: Find out more - url: https://typesense.org/api/#index-document - name: documents - - description: Hand-curate search results based on conditional business rules - externalDocs: - description: Find out more - url: https://typesense.org/docs/0.23.0/api/#curation - name: curation - - description: Typesense can aggregate search queries for both analytics purposes and for query suggestions. - externalDocs: - description: Find out more - url: https://typesense.org/docs/28.0/api/analytics-query-suggestions.html - name: analytics - - description: Manage API Keys with fine-grain access control - externalDocs: - description: Find out more - url: https://typesense.org/docs/0.23.0/api/#api-keys - name: keys - - description: Debugging information - name: debug - - description: Manage Typesense cluster - externalDocs: - description: Find out more - url: https://typesense.org/docs/28.0/api/cluster-operations.html - name: operations - - description: Manage stopwords sets - externalDocs: - description: Find out more - url: https://typesense.org/docs/28.0/api/stopwords.html - name: stopwords - - description: Store and reference search parameters - externalDocs: - description: Find out more - url: https://typesense.org/docs/28.0/api/search.html#presets - name: presets - - description: Conversational Search (RAG) - externalDocs: - description: Find out more - url: https://typesense.org/docs/28.0/api/conversational-search-rag.html - name: conversations - - description: Manage synonyms - externalDocs: - description: Find out more - url: https://typesense.org/docs/28.0/api/synonyms.html - name: synonyms - - description: Manage stemming dictionaries - externalDocs: - description: Find out more - url: https://typesense.org/docs/28.0/api/stemming.html - name: stemming + description: Stop sequences for the NL model (Google-specific) + api_version: + type: string + description: API version for the NL model service + project_id: + type: string + description: Project ID for GCP Vertex AI + access_token: + type: string + description: Access token for GCP Vertex AI + refresh_token: + type: string + description: Refresh token for GCP Vertex AI + client_id: + type: string + description: Client ID for GCP Vertex AI + client_secret: + type: string + description: Client secret for GCP Vertex AI + region: + type: string + description: Region for GCP Vertex AI + max_output_tokens: + type: integer + description: Maximum output tokens for GCP Vertex AI + account_id: + type: string + description: Account ID for Cloudflare-specific models + + NLSearchModelCreateSchema: + allOf: + - $ref: '#/components/schemas/NLSearchModelBase' + - type: object + properties: + id: + type: string + description: Optional ID for the NL search model + + NLSearchModelSchema: + allOf: + - $ref: '#/components/schemas/NLSearchModelCreateSchema' + - type: object + required: + - id + properties: + id: + type: string + description: ID of the NL search model + + NLSearchModelUpdateSchema: + $ref: '#/components/schemas/NLSearchModelCreateSchema' + + NLSearchModelDeleteSchema: + type: object + required: + - id + properties: + id: + type: string + description: ID of the deleted NL search model + + securitySchemes: + api_key_header: + type: apiKey + name: X-TYPESENSE-API-KEY + in: header diff --git a/preprocessed_openapi.yml b/preprocessed_openapi.yml new file mode 100644 index 0000000..b2740e7 --- /dev/null +++ b/preprocessed_openapi.yml @@ -0,0 +1,4481 @@ +openapi: 3.0.3 +info: + title: Typesense API + description: An open source search engine for building delightful search experiences. + version: '28.0' +externalDocs: + description: Find out more about Typsesense + url: https://typesense.org +security: +- api_key_header: [] +tags: +- name: collections + description: A collection is defined by a schema + externalDocs: + description: Find out more + url: https://typesense.org/api/#create-collection +- name: documents + description: A document is an individual record to be indexed and belongs to a collection + externalDocs: + description: Find out more + url: https://typesense.org/api/#index-document +- name: curation + description: Hand-curate search results based on conditional business rules + externalDocs: + description: Find out more + url: https://typesense.org/docs/0.23.0/api/#curation +- name: analytics + description: Typesense can aggregate search queries for both analytics purposes and for query suggestions. + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/analytics-query-suggestions.html +- name: keys + description: Manage API Keys with fine-grain access control + externalDocs: + description: Find out more + url: https://typesense.org/docs/0.23.0/api/#api-keys +- name: debug + description: Debugging information +- name: operations + description: Manage Typesense cluster + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/cluster-operations.html +- name: stopwords + description: Manage stopwords sets + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/stopwords.html +- name: presets + description: Store and reference search parameters + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/search.html#presets +- name: conversations + description: Conversational Search (RAG) + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/conversational-search-rag.html +- name: synonyms + description: Manage synonyms + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/synonyms.html +- name: stemming + description: Manage stemming dictionaries + externalDocs: + description: Find out more + url: https://typesense.org/docs/28.0/api/stemming.html +- name: nl_search_models + description: Manage NL search models + externalDocs: + description: Find out more + url: https://typesense.org/docs/29.0/api/natural-language-search.html +paths: + /collections: + get: + tags: + - collections + summary: List all collections + description: Returns a summary of all your collections. The collections are returned sorted by creation date, with the most recent collections appearing first. + operationId: getCollections + responses: + '200': + description: List of all collections + content: + application/json: + schema: + type: array + x-go-type: '[]*CollectionResponse' + items: + $ref: '#/components/schemas/CollectionResponse' + post: + tags: + - collections + summary: Create a new collection + description: When a collection is created, we give it a name and describe the fields that will be indexed from the documents added to the collection. + operationId: createCollection + requestBody: + description: The collection object to be created + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionSchema' + required: true + responses: + '201': + description: Collection successfully created + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionResponse' + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + '409': + description: Collection already exists + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /collections/{collectionName}: + get: + tags: + - collections + summary: Retrieve a single collection + description: Retrieve the details of a collection, given its name. + operationId: getCollection + parameters: + - name: collectionName + in: path + description: The name of the collection to retrieve + required: true + schema: + type: string + responses: + '200': + description: Collection fetched + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionResponse' + '404': + description: Collection not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + patch: + tags: + - collections + summary: Update a collection + description: Update a collection's schema to modify the fields and their types. + operationId: updateCollection + parameters: + - name: collectionName + in: path + description: The name of the collection to update + required: true + schema: + type: string + requestBody: + description: The document object with fields to be updated + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionUpdateSchema' + required: true + responses: + '200': + description: The updated partial collection schema + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionUpdateSchema' + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + '404': + description: The collection was not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + delete: + tags: + - collections + summary: Delete a collection + description: Permanently drops a collection. This action cannot be undone. For large collections, this might have an impact on read latencies. + operationId: deleteCollection + parameters: + - name: collectionName + in: path + description: The name of the collection to delete + required: true + schema: + type: string + responses: + '200': + description: Collection deleted + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionResponse' + '404': + description: Collection not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /collections/{collectionName}/documents: + post: + tags: + - documents + summary: Index a document + description: A document to be indexed in a given collection must conform to the schema of the collection. + operationId: indexDocument + parameters: + - name: collectionName + in: path + description: The name of the collection to add the document to + required: true + schema: + type: string + - name: action + in: query + description: Additional action to perform + schema: + type: string + example: upsert + $ref: '#/components/schemas/IndexAction' + - name: dirty_values + in: query + description: Dealing with Dirty Data + schema: + $ref: '#/components/schemas/DirtyValues' + requestBody: + description: The document object to be indexed + content: + application/json: + schema: + type: object + description: Can be any key-value pair + x-go-type: interface{} + required: true + responses: + '201': + description: Document successfully created/indexed + content: + application/json: + schema: + type: object + description: Can be any key-value pair + '404': + description: Collection not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + patch: + tags: + - documents + summary: Update documents with conditional query + description: The filter_by query parameter is used to filter to specify a condition against which the documents are matched. The request body contains the fields that should be updated for any documents that match the filter condition. This endpoint is only available if the Typesense server is version `0.25.0.rc12` or later. + operationId: updateDocuments + parameters: + - name: collectionName + in: path + description: The name of the collection to update documents in + required: true + schema: + type: string + - name: filter_by + in: query + schema: + type: string + example: 'num_employees:>100 && country: [USA, UK]' + responses: + '200': + description: The response contains a single field, `num_updated`, indicating the number of documents affected. + content: + application/json: + schema: + type: object + required: + - num_updated + properties: + num_updated: + type: integer + description: The number of documents that have been updated + example: 1 + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + '404': + description: The collection was not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + requestBody: + description: The document fields to be updated + content: + application/json: + schema: + type: object + description: Can be any key-value pair + x-go-type: interface{} + required: true + delete: + tags: + - documents + summary: Delete a bunch of documents + description: Delete a bunch of documents that match a specific filter condition. Use the `batch_size` parameter to control the number of documents that should deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. + operationId: deleteDocuments + parameters: + - name: collectionName + in: path + description: The name of the collection to delete documents from + required: true + schema: + type: string + - name: filter_by + in: query + schema: + type: string + example: 'num_employees:>100 && country: [USA, UK]' + - name: batch_size + in: query + schema: + description: Batch size parameter controls the number of documents that should be deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. + type: integer + - name: ignore_not_found + in: query + schema: + type: boolean + - name: truncate + in: query + schema: + description: When true, removes all documents from the collection while preserving the collection and its schema. + type: boolean + responses: + '200': + description: Documents successfully deleted + content: + application/json: + schema: + type: object + required: + - num_deleted + properties: + num_deleted: + type: integer + '404': + description: Collection not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /collections/{collectionName}/documents/search: + get: + tags: + - documents + summary: Search for documents in a collection + description: Search for documents in a collection that match the search criteria. + operationId: searchCollection + parameters: + - name: collectionName + in: path + description: The name of the collection to search for the document under + required: true + schema: + type: string + - name: q + in: query + schema: + description: The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. + type: string + - name: query_by + in: query + schema: + description: A list of `string` fields that should be queried against. Multiple fields are separated with a comma. + type: string + - name: nl_query + in: query + schema: + description: Whether to use natural language processing to parse the query. + type: boolean + - name: nl_model_id + in: query + schema: + description: The ID of the natural language model to use. + type: string + - name: query_by_weights + in: query + schema: + description: The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. + type: string + - name: text_match_type + in: query + schema: + description: In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. + type: string + - name: prefix + in: query + schema: + description: Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. + type: string + - name: infix + in: query + schema: + description: If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results + type: string + - name: max_extra_prefix + in: query + schema: + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + type: integer + - name: max_extra_suffix + in: query + schema: + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + type: integer + - name: filter_by + in: query + schema: + description: Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. + type: string + example: 'num_employees:>100 && country: [USA, UK]' + - name: max_filter_by_candidates + in: query + schema: + description: Controls the number of similar words that Typesense considers during fuzzy search on filter_by values. Useful for controlling prefix matches like company_name:Acm*. + type: integer + - name: sort_by + in: query + schema: + description: A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` + type: string + example: num_employees:desc + - name: facet_by + in: query + schema: + description: A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. + type: string + - name: max_facet_values + in: query + schema: + description: Maximum number of facet values to be returned. + type: integer + - name: facet_query + in: query + schema: + description: Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix "shoe". + type: string + - name: num_typos + in: query + schema: + description: | + The number of typographical errors (1 or 2) that would be tolerated. Default: 2 + type: string + - name: page + in: query + schema: + description: Results from this specific page number would be fetched. + type: integer + - name: per_page + in: query + schema: + description: 'Number of results to fetch per page. Default: 10' + type: integer + - name: limit + in: query + schema: + description: | + Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. + type: integer + - name: offset + in: query + schema: + description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. + type: integer + - name: group_by + in: query + schema: + description: You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. + type: string + - name: group_limit + in: query + schema: + description: | + Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 + type: integer + - name: group_missing_values + in: query + schema: + description: | + Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true + type: boolean + - name: include_fields + in: query + schema: + description: List of fields from the document to include in the search result + type: string + - name: exclude_fields + in: query + schema: + description: List of fields from the document to exclude in the search result + type: string + - name: highlight_full_fields + in: query + schema: + description: List of fields which should be highlighted fully without snippeting + type: string + - name: highlight_affix_num_tokens + in: query + schema: + description: | + The number of tokens that should surround the highlighted text on each side. Default: 4 + type: integer + - name: highlight_start_tag + in: query + schema: + description: | + The start tag used for the highlighted snippets. Default: `` + type: string + - name: highlight_end_tag + in: query + schema: + description: | + The end tag used for the highlighted snippets. Default: `` + type: string + - name: enable_highlight_v1 + in: query + schema: + description: | + Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true + type: boolean + default: true + - name: snippet_threshold + in: query + schema: + description: | + Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 + type: integer + - name: drop_tokens_threshold + in: query + schema: + description: | + If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 + type: integer + - name: drop_tokens_mode + in: query + schema: + $ref: '#/components/schemas/DropTokensMode' + - name: typo_tokens_threshold + in: query + schema: + description: | + If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 + type: integer + - name: enable_typos_for_alpha_numerical_tokens + in: query + schema: + type: boolean + description: | + Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. + - name: filter_curated_hits + in: query + schema: + type: boolean + description: | + Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false + - name: enable_synonyms + in: query + schema: + type: boolean + description: | + If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true + - name: synonym_prefix + in: query + schema: + type: boolean + description: | + Allow synonym resolution on word prefixes in the query. Default: false + - name: synonym_num_typos + in: query + schema: + type: integer + description: | + Allow synonym resolution on typo-corrected words in the query. Default: 0 + - name: pinned_hits + in: query + schema: + description: | + A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. + You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + type: string + - name: hidden_hits + in: query + schema: + description: | + A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. + You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + type: string + - name: override_tags + in: query + schema: + description: Comma separated list of tags to trigger the curations rules that match the tags. + type: string + - name: highlight_fields + in: query + schema: + description: | + A list of custom fields that must be highlighted even if you don't query for them + type: string + - name: split_join_tokens + in: query + schema: + description: | + Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. Splitting/joining of tokens will only be attempted if the original query produces no results. To always trigger this behavior, set value to `always``. To disable, set value to `off`. Default is `fallback`. + type: string + - name: pre_segmented_query + in: query + schema: + description: | + You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. + Set this parameter to true to do the same + type: boolean + - name: preset + in: query + schema: + description: | + Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. + type: string + - name: enable_overrides + in: query + schema: + description: | + If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false + type: boolean + default: false + - name: prioritize_exact_match + in: query + schema: + description: | + Set this parameter to true to ensure that an exact match is ranked above the others + type: boolean + default: true + - name: max_candidates + in: query + schema: + description: | + Control the number of words that Typesense considers for typo and prefix searching. + type: integer + - name: prioritize_token_position + in: query + schema: + description: | + Make Typesense prioritize documents where the query words appear earlier in the text. + type: boolean + default: false + - name: prioritize_num_matching_fields + in: query + schema: + description: | + Make Typesense prioritize documents where the query words appear in more number of fields. + type: boolean + default: true + - name: enable_typos_for_numerical_tokens + in: query + schema: + description: | + Make Typesense disable typos for numerical tokens. + type: boolean + default: true + - name: exhaustive_search + in: query + schema: + description: | + Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). + type: boolean + - name: search_cutoff_ms + in: query + schema: + description: | + Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. + type: integer + - name: use_cache + in: query + schema: + description: | + Enable server side caching of search query results. By default, caching is disabled. + type: boolean + - name: cache_ttl + in: query + schema: + description: | + The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. + type: integer + - name: min_len_1typo + in: query + schema: + description: | + Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + type: integer + - name: min_len_2typo + in: query + schema: + description: | + Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + type: integer + - name: vector_query + in: query + schema: + description: | + Vector query expression for fetching documents "closest" to a given query/document vector. + type: string + - name: remote_embedding_timeout_ms + in: query + schema: + description: | + Timeout (in milliseconds) for fetching remote embeddings. + type: integer + - name: remote_embedding_num_tries + in: query + schema: + description: | + Number of times to retry fetching remote embeddings. + type: integer + - name: facet_strategy + in: query + schema: + description: | + Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). + type: string + - name: stopwords + in: query + schema: + description: | + Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. + type: string + - name: facet_return_parent + in: query + schema: + description: | + Comma separated string of nested facet fields whose parent object should be returned in facet response. + type: string + - name: voice_query + in: query + schema: + description: | + The base64 encoded audio file in 16 khz 16-bit WAV format. + type: string + - name: conversation + in: query + schema: + description: | + Enable conversational search. + type: boolean + - name: conversation_model_id + in: query + schema: + description: | + The Id of Conversation Model to be used. + type: string + - name: conversation_id + in: query + schema: + description: | + The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. + type: string + responses: + '200': + description: Search results + content: + application/json: + schema: + $ref: '#/components/schemas/SearchResult' + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + '404': + description: The collection or field was not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /collections/{collectionName}/overrides: + get: + tags: + - documents + - curation + summary: List all collection overrides + operationId: getSearchOverrides + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: + type: string + responses: + '200': + description: List of all search overrides + content: + application/json: + schema: + $ref: '#/components/schemas/SearchOverridesResponse' + /collections/{collectionName}/overrides/{overrideId}: + get: + tags: + - documents + - override + summary: Retrieve a single search override + description: Retrieve the details of a search override, given its id. + operationId: getSearchOverride + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: + type: string + - name: overrideId + in: path + description: The id of the search override + required: true + schema: + type: string + responses: + '200': + description: Search override fetched + content: + application/json: + schema: + $ref: '#/components/schemas/SearchOverride' + put: + tags: + - documents + - curation + summary: Create or update an override to promote certain documents over others + description: Create or update an override to promote certain documents over others. Using overrides, you can include or exclude specific documents for a given query. + operationId: upsertSearchOverride + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: + type: string + - name: overrideId + in: path + description: The ID of the search override to create/update + required: true + schema: + type: string + requestBody: + description: The search override object to be created/updated + content: + application/json: + schema: + $ref: '#/components/schemas/SearchOverrideSchema' + required: true + responses: + '200': + description: Created/updated search override + content: + application/json: + schema: + $ref: '#/components/schemas/SearchOverride' + '404': + description: Search override not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + delete: + tags: + - documents + - curation + summary: Delete an override associated with a collection + operationId: deleteSearchOverride + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: + type: string + - name: overrideId + in: path + description: The ID of the search override to delete + required: true + schema: + type: string + responses: + '200': + description: The ID of the deleted search override + content: + application/json: + schema: + $ref: '#/components/schemas/SearchOverrideDeleteResponse' + '404': + description: Search override not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /collections/{collectionName}/synonyms: + get: + tags: + - synonyms + summary: List all collection synonyms + operationId: getSearchSynonyms + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: + type: string + responses: + '200': + description: List of all search synonyms + content: + application/json: + schema: + $ref: '#/components/schemas/SearchSynonymsResponse' + '404': + description: Search synonyms was not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /collections/{collectionName}/synonyms/{synonymId}: + get: + tags: + - synonyms + summary: Retrieve a single search synonym + description: Retrieve the details of a search synonym, given its id. + operationId: getSearchSynonym + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: + type: string + - name: synonymId + in: path + description: The id of the search synonym + required: true + schema: + type: string + responses: + '200': + description: Search synonym fetched + content: + application/json: + schema: + $ref: '#/components/schemas/SearchSynonym' + '404': + description: Search synonym was not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + put: + tags: + - synonyms + summary: Create or update a synonym + description: Create or update a synonym to define search terms that should be considered equivalent. + operationId: upsertSearchSynonym + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: + type: string + - name: synonymId + in: path + description: The ID of the search synonym to create/update + required: true + schema: + type: string + requestBody: + description: The search synonym object to be created/updated + content: + application/json: + schema: + $ref: '#/components/schemas/SearchSynonymSchema' + required: true + responses: + '200': + description: Created/updated search synonym + content: + application/json: + schema: + $ref: '#/components/schemas/SearchSynonym' + '404': + description: Search synonym was not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + delete: + tags: + - synonyms + summary: Delete a synonym associated with a collection + operationId: deleteSearchSynonym + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: + type: string + - name: synonymId + in: path + description: The ID of the search synonym to delete + required: true + schema: + type: string + responses: + '200': + description: The ID of the deleted search synonym + content: + application/json: + schema: + $ref: '#/components/schemas/SearchSynonymDeleteResponse' + '404': + description: Search synonym not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /collections/{collectionName}/documents/export: + get: + tags: + - documents + summary: Export all documents in a collection + description: Export all documents in a collection in JSON lines format. + operationId: exportDocuments + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: + type: string + - name: filter_by + in: query + schema: + description: Filter conditions for refining your search results. Separate multiple conditions with &&. + type: string + - name: include_fields + in: query + schema: + description: List of fields from the document to include in the search result + type: string + - name: exclude_fields + in: query + schema: + description: List of fields from the document to exclude in the search result + type: string + responses: + '200': + description: Exports all the documents in a given collection. + content: + application/octet-stream: + schema: + type: string + example: | + {"id": "124", "company_name": "Stark Industries", "num_employees": 5215, "country": "US"} + {"id": "125", "company_name": "Future Technology", "num_employees": 1232,"country": "UK"} + {"id": "126", "company_name": "Random Corp.", "num_employees": 531,"country": "AU"} + '404': + description: The collection was not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /collections/{collectionName}/documents/import: + post: + tags: + - documents + summary: Import documents into a collection + description: The documents to be imported must be formatted in a newline delimited JSON structure. You can feed the output file from a Typesense export operation directly as import. + operationId: importDocuments + parameters: + - name: collectionName + in: path + description: The name of the collection + required: true + schema: + type: string + - name: batch_size + in: query + schema: + type: integer + - name: return_id + in: query + schema: + type: boolean + description: Returning the id of the imported documents. If you want the import response to return the ingested document's id in the response, you can use the return_id parameter. + - name: remote_embedding_batch_size + in: query + schema: + type: integer + - name: return_doc + in: query + schema: + type: boolean + - name: action + in: query + schema: + $ref: '#/components/schemas/IndexAction' + - name: dirty_values + in: query + schema: + $ref: '#/components/schemas/DirtyValues' + requestBody: + description: The json array of documents or the JSONL file to import + content: + application/octet-stream: + schema: + type: string + description: The JSONL file to import + required: true + responses: + '200': + description: Result of the import operation. Each line of the response indicates the result of each document present in the request body (in the same order). If the import of a single document fails, it does not affect the other documents. If there is a failure, the response line will include a corresponding error message and as well as the actual document content. + content: + application/octet-stream: + schema: + type: string + example: | + {"success": true} + {"success": false, "error": "Bad JSON.", "document": "[bad doc"} + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + '404': + description: The collection was not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /collections/{collectionName}/documents/{documentId}: + get: + tags: + - documents + summary: Retreive a document + description: Fetch an individual document from a collection by using its ID. + operationId: getDocument + parameters: + - name: collectionName + in: path + description: The name of the collection to search for the document under + required: true + schema: + type: string + - name: documentId + in: path + description: The Document ID + required: true + schema: + type: string + responses: + '200': + description: The document referenced by the ID + content: + application/json: + schema: + type: object + description: Can be any key-value pair + '404': + description: The document or collection was not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + patch: + tags: + - documents + summary: Update a document + description: Update an individual document from a collection by using its ID. The update can be partial. + operationId: updateDocument + parameters: + - name: collectionName + in: path + description: The name of the collection to search for the document under + required: true + schema: + type: string + - name: documentId + in: path + description: The Document ID + required: true + schema: + type: string + - name: dirty_values + in: query + description: Dealing with Dirty Data + schema: + $ref: '#/components/schemas/DirtyValues' + requestBody: + description: The document object with fields to be updated + content: + application/json: + schema: + type: object + description: Can be any key-value pair + x-go-type: interface{} + required: true + responses: + '200': + description: The document referenced by the ID was updated + content: + application/json: + schema: + type: object + description: Can be any key-value pair + '404': + description: The document or collection was not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + delete: + tags: + - documents + summary: Delete a document + description: Delete an individual document from a collection by using its ID. + operationId: deleteDocument + parameters: + - name: collectionName + in: path + description: The name of the collection to search for the document under + required: true + schema: + type: string + - name: documentId + in: path + description: The Document ID + required: true + schema: + type: string + responses: + '200': + description: The document referenced by the ID was deleted + content: + application/json: + schema: + type: object + description: Can be any key-value pair + '404': + description: The document or collection was not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /conversations/models: + get: + description: Retrieve all conversation models + operationId: retrieveAllConversationModels + responses: + '200': + content: + application/json: + schema: + items: + $ref: '#/components/schemas/ConversationModelSchema' + type: array + x-go-type: '[]*ConversationModelSchema' + description: List of all conversation models + summary: List all conversation models + tags: + - conversations + post: + description: Create a Conversation Model + operationId: createConversationModel + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ConversationModelCreateSchema' + required: true + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/ConversationModelSchema' + description: Created Conversation Model + '400': + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + description: Bad request, see error message for details + tags: + - conversations + /conversations/models/{modelId}: + get: + description: Retrieve a conversation model + operationId: retrieveConversationModel + parameters: + - name: modelId + in: path + description: The id of the conversation model to retrieve + required: true + schema: + type: string + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/ConversationModelSchema' + description: A conversation model + summary: Retrieve a conversation model + tags: + - conversations + put: + description: Update a conversation model + operationId: updateConversationModel + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ConversationModelUpdateSchema' + required: true + parameters: + - name: modelId + in: path + description: The id of the conversation model to update + required: true + schema: + type: string + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/ConversationModelSchema' + description: The conversation model was successfully updated + summary: Update a conversation model + tags: + - conversations + delete: + description: Delete a conversation model + operationId: deleteConversationModel + parameters: + - name: modelId + in: path + description: The id of the conversation model to delete + required: true + schema: + type: string + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/ConversationModelSchema' + description: The conversation model was successfully deleted + summary: Delete a conversation model + tags: + - conversations + /keys: + get: + tags: + - keys + summary: Retrieve (metadata about) all keys. + operationId: getKeys + responses: + '200': + description: List of all keys + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKeysResponse' + post: + tags: + - keys + summary: Create an API Key + description: Create an API Key with fine-grain access control. You can restrict access on both a per-collection and per-action level. The generated key is returned only during creation. You want to store this key carefully in a secure place. + operationId: createKey + requestBody: + description: The object that describes API key scope + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKeySchema' + responses: + '201': + description: Created API key + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKey' + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + '409': + description: API key generation conflict + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /keys/{keyId}: + get: + tags: + - keys + summary: Retrieve (metadata about) a key + description: Retrieve (metadata about) a key. Only the key prefix is returned when you retrieve a key. Due to security reasons, only the create endpoint returns the full API key. + operationId: getKey + parameters: + - name: keyId + in: path + description: The ID of the key to retrieve + required: true + schema: + type: integer + format: int64 + responses: + '200': + description: The key referenced by the ID + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKey' + '404': + description: The key was not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + delete: + tags: + - keys + summary: Delete an API key given its ID. + operationId: deleteKey + parameters: + - name: keyId + in: path + description: The ID of the key to delete + required: true + schema: + type: integer + format: int64 + responses: + '200': + description: The key referenced by the ID + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKeyDeleteResponse' + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + '404': + description: Key not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /aliases: + get: + tags: + - collections + summary: List all aliases + description: List all aliases and the corresponding collections that they map to. + operationId: getAliases + responses: + '200': + description: List of all collection aliases + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionAliasesResponse' + /aliases/{aliasName}: + put: + tags: + - collections + summary: Create or update a collection alias + description: Create or update a collection alias. An alias is a virtual collection name that points to a real collection. If you're familiar with symbolic links on Linux, it's very similar to that. Aliases are useful when you want to reindex your data in the background on a new collection and switch your application to it without any changes to your code. + operationId: upsertAlias + parameters: + - name: aliasName + in: path + description: The name of the alias to create/update + required: true + schema: + type: string + requestBody: + description: Collection alias to be created/updated + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionAliasSchema' + responses: + '200': + description: The collection alias was created/updated + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionAlias' + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + '404': + description: Alias not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + get: + tags: + - collections + summary: Retrieve an alias + description: Find out which collection an alias points to by fetching it + operationId: getAlias + parameters: + - name: aliasName + in: path + description: The name of the alias to retrieve + required: true + schema: + type: string + responses: + '200': + description: Collection alias fetched + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionAlias' + '404': + description: The alias was not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + delete: + tags: + - collections + summary: Delete an alias + operationId: deleteAlias + parameters: + - name: aliasName + in: path + description: The name of the alias to delete + required: true + schema: + type: string + responses: + '200': + description: Collection alias was deleted + content: + application/json: + schema: + $ref: '#/components/schemas/CollectionAlias' + '404': + description: Alias not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /debug: + get: + tags: + - debug + summary: Print debugging information + description: Print debugging information + operationId: debug + responses: + '200': + description: Debugging information + content: + application/json: + schema: + type: object + properties: + version: + type: string + /health: + get: + tags: + - health + summary: Checks if Typesense server is ready to accept requests. + description: Checks if Typesense server is ready to accept requests. + operationId: health + responses: + '200': + description: Search service is ready for requests. + content: + application/json: + schema: + $ref: '#/components/schemas/HealthStatus' + /operations/schema_changes: + get: + tags: + - operations + summary: Get the status of in-progress schema change operations + description: Returns the status of any ongoing schema change operations. If no schema changes are in progress, returns an empty response. + operationId: getSchemaChanges + responses: + '200': + description: List of schema changes in progress + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/SchemaChangeStatus' + /operations/snapshot: + post: + tags: + - operations + summary: Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. + description: Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. You can then backup the snapshot directory that gets created and later restore it as a data directory, as needed. + operationId: takeSnapshot + parameters: + - name: snapshot_path + in: query + description: The directory on the server where the snapshot should be saved. + required: true + schema: + type: string + responses: + '201': + description: Snapshot is created. + content: + application/json: + schema: + $ref: '#/components/schemas/SuccessStatus' + /operations/vote: + post: + tags: + - operations + summary: Triggers a follower node to initiate the raft voting process, which triggers leader re-election. + description: Triggers a follower node to initiate the raft voting process, which triggers leader re-election. The follower node that you run this operation against will become the new leader, once this command succeeds. + operationId: vote + responses: + '200': + description: Re-election is performed. + content: + application/json: + schema: + $ref: '#/components/schemas/SuccessStatus' + /multi_search: + post: + operationId: multiSearch + tags: + - documents + summary: send multiple search requests in a single HTTP request + description: This is especially useful to avoid round-trip network latencies incurred otherwise if each of these requests are sent in separate HTTP requests. You can also use this feature to do a federated search across multiple collections in a single HTTP request. + parameters: + - name: q + in: query + schema: + description: The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. + type: string + - name: query_by + in: query + schema: + description: A list of `string` fields that should be queried against. Multiple fields are separated with a comma. + type: string + - name: query_by_weights + in: query + schema: + description: The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. + type: string + - name: text_match_type + in: query + schema: + description: In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. + type: string + - name: prefix + in: query + schema: + description: Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. + type: string + - name: infix + in: query + schema: + description: If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results + type: string + - name: max_extra_prefix + in: query + schema: + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + type: integer + - name: max_extra_suffix + in: query + schema: + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + type: integer + - name: filter_by + in: query + schema: + description: Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. + type: string + example: 'num_employees:>100 && country: [USA, UK]' + - name: sort_by + in: query + schema: + description: A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` + type: string + - name: facet_by + in: query + schema: + description: A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. + type: string + - name: max_facet_values + in: query + schema: + description: Maximum number of facet values to be returned. + type: integer + - name: facet_query + in: query + schema: + description: Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix "shoe". + type: string + - name: num_typos + in: query + schema: + description: | + The number of typographical errors (1 or 2) that would be tolerated. Default: 2 + type: string + - name: page + in: query + schema: + description: Results from this specific page number would be fetched. + type: integer + - name: per_page + in: query + schema: + description: 'Number of results to fetch per page. Default: 10' + type: integer + - name: limit + in: query + schema: + description: | + Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. + type: integer + - name: offset + in: query + schema: + description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. + type: integer + - name: group_by + in: query + schema: + description: You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. + type: string + - name: group_limit + in: query + schema: + description: | + Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 + type: integer + - name: group_missing_values + in: query + schema: + description: | + Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true + type: boolean + - name: include_fields + in: query + schema: + description: List of fields from the document to include in the search result + type: string + - name: exclude_fields + in: query + schema: + description: List of fields from the document to exclude in the search result + type: string + - name: highlight_full_fields + in: query + schema: + description: List of fields which should be highlighted fully without snippeting + type: string + - name: highlight_affix_num_tokens + in: query + schema: + description: | + The number of tokens that should surround the highlighted text on each side. Default: 4 + type: integer + - name: highlight_start_tag + in: query + schema: + description: | + The start tag used for the highlighted snippets. Default: `` + type: string + - name: highlight_end_tag + in: query + schema: + description: | + The end tag used for the highlighted snippets. Default: `` + type: string + - name: snippet_threshold + in: query + schema: + description: | + Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 + type: integer + - name: drop_tokens_threshold + in: query + schema: + description: | + If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 + type: integer + - name: drop_tokens_mode + in: query + schema: + $ref: '#/components/schemas/DropTokensMode' + - name: typo_tokens_threshold + in: query + schema: + description: | + If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 + type: integer + - name: enable_typos_for_alpha_numerical_tokens + in: query + schema: + type: boolean + description: | + Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. + - name: filter_curated_hits + in: query + schema: + type: boolean + description: | + Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false + - name: enable_synonyms + in: query + schema: + type: boolean + description: | + If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true + - name: synonym_prefix + in: query + schema: + type: boolean + description: | + Allow synonym resolution on word prefixes in the query. Default: false + - name: synonym_num_typos + in: query + schema: + type: integer + description: | + Allow synonym resolution on typo-corrected words in the query. Default: 0 + - name: pinned_hits + in: query + schema: + description: | + A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. + You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + type: string + - name: hidden_hits + in: query + schema: + description: | + A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. + You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + type: string + - name: override_tags + in: query + schema: + description: Comma separated list of tags to trigger the curations rules that match the tags. + type: string + - name: highlight_fields + in: query + schema: + description: | + A list of custom fields that must be highlighted even if you don't query for them + type: string + - name: pre_segmented_query + in: query + schema: + description: | + You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. + Set this parameter to true to do the same + type: boolean + default: false + - name: preset + in: query + schema: + description: | + Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. + type: string + - name: enable_overrides + in: query + schema: + description: | + If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false + type: boolean + default: false + - name: prioritize_exact_match + in: query + schema: + description: | + Set this parameter to true to ensure that an exact match is ranked above the others + type: boolean + default: true + - name: prioritize_token_position + in: query + schema: + description: | + Make Typesense prioritize documents where the query words appear earlier in the text. + type: boolean + default: false + - name: prioritize_num_matching_fields + in: query + schema: + description: | + Make Typesense prioritize documents where the query words appear in more number of fields. + type: boolean + default: true + - name: enable_typos_for_numerical_tokens + in: query + schema: + description: | + Make Typesense disable typos for numerical tokens. + type: boolean + default: true + - name: exhaustive_search + in: query + schema: + description: | + Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). + type: boolean + - name: search_cutoff_ms + in: query + schema: + description: | + Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. + type: integer + - name: use_cache + in: query + schema: + description: | + Enable server side caching of search query results. By default, caching is disabled. + type: boolean + - name: cache_ttl + in: query + schema: + description: | + The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. + type: integer + - name: min_len_1typo + in: query + schema: + description: | + Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + type: integer + - name: min_len_2typo + in: query + schema: + description: | + Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + type: integer + - name: vector_query + in: query + schema: + description: | + Vector query expression for fetching documents "closest" to a given query/document vector. + type: string + - name: remote_embedding_timeout_ms + in: query + schema: + description: | + Timeout (in milliseconds) for fetching remote embeddings. + type: integer + - name: remote_embedding_num_tries + in: query + schema: + description: | + Number of times to retry fetching remote embeddings. + type: integer + - name: facet_strategy + in: query + schema: + description: | + Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). + type: string + - name: stopwords + in: query + schema: + description: | + Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. + type: string + - name: facet_return_parent + in: query + schema: + description: | + Comma separated string of nested facet fields whose parent object should be returned in facet response. + type: string + - name: voice_query + in: query + schema: + description: | + The base64 encoded audio file in 16 khz 16-bit WAV format. + type: string + - name: conversation + in: query + schema: + description: | + Enable conversational search. + type: boolean + - name: conversation_model_id + in: query + schema: + description: | + The Id of Conversation Model to be used. + type: string + - name: conversation_id + in: query + schema: + description: | + The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/MultiSearchSearchesParameter' + responses: + '200': + description: Search results + content: + application/json: + schema: + $ref: '#/components/schemas/MultiSearchResult' + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /analytics/events: + post: + tags: + - analytics + summary: Create an analytics event + description: Sending events for analytics e.g rank search results based on popularity. + operationId: createAnalyticsEvent + requestBody: + description: The Analytics event to be created + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsEventCreateSchema' + required: true + responses: + '201': + description: Analytics event successfully created + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsEventCreateResponse' + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /analytics/rules: + post: + tags: + - analytics + summary: Creates an analytics rule + description: When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. + operationId: createAnalyticsRule + requestBody: + description: The Analytics rule to be created + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsRuleSchema' + required: true + responses: + '201': + description: Analytics rule successfully created + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsRuleSchema' + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + get: + tags: + - analytics + summary: Retrieves all analytics rules + description: Retrieve the details of all analytics rules + operationId: retrieveAnalyticsRules + responses: + '200': + description: Analytics rules fetched + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsRulesRetrieveSchema' + /analytics/rules/{ruleName}: + put: + tags: + - analytics + summary: Upserts an analytics rule + description: Upserts an analytics rule with the given name. + operationId: upsertAnalyticsRule + parameters: + - in: path + name: ruleName + description: The name of the analytics rule to upsert + schema: + type: string + required: true + requestBody: + description: The Analytics rule to be upserted + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsRuleUpsertSchema' + required: true + responses: + '200': + description: Analytics rule successfully upserted + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsRuleSchema' + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + get: + tags: + - analytics + summary: Retrieves an analytics rule + description: Retrieve the details of an analytics rule, given it's name + operationId: retrieveAnalyticsRule + parameters: + - in: path + name: ruleName + description: The name of the analytics rule to retrieve + schema: + type: string + required: true + responses: + '200': + description: Analytics rule fetched + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsRuleSchema' + '404': + description: Analytics rule not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + delete: + tags: + - analytics + summary: Delete an analytics rule + description: Permanently deletes an analytics rule, given it's name + operationId: deleteAnalyticsRule + parameters: + - in: path + name: ruleName + description: The name of the analytics rule to delete + schema: + type: string + required: true + responses: + '200': + description: Analytics rule deleted + content: + application/json: + schema: + $ref: '#/components/schemas/AnalyticsRuleDeleteResponse' + '404': + description: Analytics rule not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /metrics.json: + get: + tags: + - operations + summary: Get current RAM, CPU, Disk & Network usage metrics. + description: Retrieve the metrics. + operationId: retrieveMetrics + responses: + '200': + description: Metrics fetched. + content: + application/json: + schema: + type: object + /stats.json: + get: + tags: + - operations + summary: Get stats about API endpoints. + description: Retrieve the stats about API endpoints. + operationId: retrieveAPIStats + responses: + '200': + description: Stats fetched. + content: + application/json: + schema: + $ref: '#/components/schemas/APIStatsResponse' + /stopwords: + get: + tags: + - stopwords + summary: Retrieves all stopwords sets. + description: Retrieve the details of all stopwords sets + operationId: retrieveStopwordsSets + responses: + '200': + description: Stopwords sets fetched. + content: + application/json: + schema: + $ref: '#/components/schemas/StopwordsSetsRetrieveAllSchema' + /stopwords/{setId}: + put: + tags: + - stopwords + summary: Upserts a stopwords set. + description: When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. + operationId: upsertStopwordsSet + parameters: + - in: path + name: setId + description: The ID of the stopwords set to upsert. + schema: + type: string + required: true + example: countries + requestBody: + description: The stopwords set to upsert. + content: + application/json: + schema: + $ref: '#/components/schemas/StopwordsSetUpsertSchema' + required: true + responses: + '200': + description: Stopwords set successfully upserted. + content: + application/json: + schema: + $ref: '#/components/schemas/StopwordsSetSchema' + '400': + description: Bad request, see error message for details. + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + get: + tags: + - stopwords + summary: Retrieves a stopwords set. + description: Retrieve the details of a stopwords set, given it's name. + operationId: retrieveStopwordsSet + parameters: + - in: path + name: setId + description: The ID of the stopwords set to retrieve. + schema: + type: string + required: true + example: countries + responses: + '200': + description: Stopwords set fetched. + content: + application/json: + schema: + $ref: '#/components/schemas/StopwordsSetRetrieveSchema' + '404': + description: Stopwords set not found. + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + delete: + tags: + - stopwords + summary: Delete a stopwords set. + description: Permanently deletes a stopwords set, given it's name. + operationId: deleteStopwordsSet + parameters: + - in: path + name: setId + description: The ID of the stopwords set to delete. + schema: + type: string + required: true + example: countries + responses: + '200': + description: Stopwords set rule deleted. + content: + application/json: + schema: + type: object + properties: + id: + type: string + required: + - id + example: | + {"id": "countries"} + '404': + description: Stopwords set not found. + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /presets: + get: + tags: + - presets + summary: Retrieves all presets. + description: Retrieve the details of all presets + operationId: retrieveAllPresets + responses: + '200': + description: Presets fetched. + content: + application/json: + schema: + $ref: '#/components/schemas/PresetsRetrieveSchema' + /presets/{presetId}: + get: + tags: + - presets + summary: Retrieves a preset. + description: Retrieve the details of a preset, given it's name. + operationId: retrievePreset + parameters: + - in: path + name: presetId + description: The ID of the preset to retrieve. + schema: + type: string + required: true + example: listing_view + responses: + '200': + description: Preset fetched. + content: + application/json: + schema: + $ref: '#/components/schemas/PresetSchema' + '404': + description: Preset not found. + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + put: + tags: + - presets + summary: Upserts a preset. + description: Create or update an existing preset. + operationId: upsertPreset + parameters: + - in: path + name: presetId + description: The name of the preset set to upsert. + schema: + type: string + required: true + example: listing_view + requestBody: + description: The stopwords set to upsert. + content: + application/json: + schema: + $ref: '#/components/schemas/PresetUpsertSchema' + required: true + responses: + '200': + description: Preset successfully upserted. + content: + application/json: + schema: + $ref: '#/components/schemas/PresetSchema' + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + delete: + tags: + - presets + summary: Delete a preset. + description: Permanently deletes a preset, given it's name. + operationId: deletePreset + parameters: + - in: path + name: presetId + description: The ID of the preset to delete. + schema: + type: string + required: true + example: listing_view + responses: + '200': + description: Preset deleted. + content: + application/json: + schema: + $ref: '#/components/schemas/PresetDeleteSchema' + '404': + description: Preset not found. + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /stemming/dictionaries: + get: + tags: + - stemming + summary: List all stemming dictionaries + description: Retrieve a list of all available stemming dictionaries. + operationId: listStemmingDictionaries + responses: + '200': + description: List of all dictionaries + content: + application/json: + schema: + type: object + properties: + dictionaries: + type: array + items: + type: string + example: + - irregular-plurals + - company-terms + /stemming/dictionaries/{dictionaryId}: + get: + tags: + - stemming + summary: Retrieve a stemming dictionary + description: Fetch details of a specific stemming dictionary. + operationId: getStemmingDictionary + parameters: + - name: dictionaryId + in: path + description: The ID of the dictionary to retrieve + required: true + schema: + type: string + example: irregular-plurals + responses: + '200': + description: Stemming dictionary details + content: + application/json: + schema: + $ref: '#/components/schemas/StemmingDictionary' + '404': + description: Dictionary not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /stemming/dictionaries/import: + post: + tags: + - stemming + summary: Import a stemming dictionary + description: Upload a JSONL file containing word mappings to create or update a stemming dictionary. + operationId: importStemmingDictionary + parameters: + - name: id + in: query + description: The ID to assign to the dictionary + required: true + schema: + type: string + example: irregular-plurals + requestBody: + description: The JSONL file containing word mappings + required: true + content: + application/json: + schema: + type: string + example: | + {"word": "people", "root": "person"} + {"word": "children", "root": "child"} + responses: + '200': + description: Dictionary successfully imported + content: + application/octet-stream: + schema: + type: string + example: | + {"word": "people", "root": "person"} {"word": "children", "root": "child"} + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /nl_search_models: + get: + tags: + - nl_search_models + summary: List all NL search models + description: Retrieve all NL search models. + operationId: retrieveAllNLSearchModels + responses: + '200': + description: List of all NL search models + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/NLSearchModelSchema' + post: + tags: + - nl_search_models + summary: Create a NL search model + description: Create a new NL search model. + operationId: createNLSearchModel + requestBody: + description: The NL search model to be created + content: + application/json: + schema: + $ref: '#/components/schemas/NLSearchModelCreateSchema' + required: true + responses: + '201': + description: NL search model successfully created + content: + application/json: + schema: + $ref: '#/components/schemas/NLSearchModelSchema' + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + /nl_search_models/{modelId}: + get: + tags: + - nl_search_models + summary: Retrieve a NL search model + description: Retrieve a specific NL search model by its ID. + operationId: retrieveNLSearchModel + parameters: + - name: modelId + in: path + description: The ID of the NL search model to retrieve + required: true + schema: + type: string + responses: + '200': + description: NL search model fetched + content: + application/json: + schema: + $ref: '#/components/schemas/NLSearchModelSchema' + '404': + description: NL search model not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + put: + tags: + - nl_search_models + summary: Update a NL search model + description: Update an existing NL search model. + operationId: updateNLSearchModel + parameters: + - name: modelId + in: path + description: The ID of the NL search model to update + required: true + schema: + type: string + requestBody: + description: The NL search model fields to update + content: + application/json: + schema: + $ref: '#/components/schemas/NLSearchModelUpdateSchema' + required: true + responses: + '200': + description: NL search model successfully updated + content: + application/json: + schema: + $ref: '#/components/schemas/NLSearchModelSchema' + '400': + description: Bad request, see error message for details + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + '404': + description: NL search model not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' + delete: + tags: + - nl_search_models + summary: Delete a NL search model + description: Delete a specific NL search model by its ID. + operationId: deleteNLSearchModel + parameters: + - name: modelId + in: path + description: The ID of the NL search model to delete + required: true + schema: + type: string + responses: + '200': + description: NL search model successfully deleted + content: + application/json: + schema: + $ref: '#/components/schemas/NLSearchModelDeleteSchema' + '404': + description: NL search model not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiResponse' +components: + schemas: + CollectionSchema: + required: + - name + - fields + type: object + properties: + name: + type: string + description: Name of the collection + example: companies + fields: + type: array + description: A list of fields for querying, filtering and faceting + example: + - name: num_employees + type: int32 + facet: false + - name: company_name + type: string + facet: false + - name: country + type: string + facet: true + items: + $ref: '#/components/schemas/Field' + default_sorting_field: + type: string + description: The name of an int32 / float field that determines the order in which the search results are ranked when a sort_by clause is not provided during searching. This field must indicate some kind of popularity. + example: num_employees + default: '' + token_separators: + type: array + description: | + List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. + items: + type: string + minLength: 1 + maxLength: 1 + default: [] + enable_nested_fields: + type: boolean + description: Enables experimental support at a collection level for nested object or object array fields. This field is only available if the Typesense server is version `0.24.0.rcn34` or later. + default: false + example: true + symbols_to_index: + type: array + description: | + List of symbols or special characters to be indexed. + items: + type: string + minLength: 1 + maxLength: 1 + default: [] + voice_query_model: + $ref: '#/components/schemas/VoiceQueryModelCollectionConfig' + CollectionUpdateSchema: + required: + - fields + type: object + properties: + fields: + type: array + description: A list of fields for querying, filtering and faceting + example: + - name: company_name + type: string + facet: false + - name: num_employees + type: int32 + facet: false + - name: country + type: string + facet: true + items: + $ref: '#/components/schemas/Field' + CollectionResponse: + allOf: + - $ref: '#/components/schemas/CollectionSchema' + - type: object + required: + - num_documents + - created_at + properties: + num_documents: + type: integer + description: Number of documents in the collection + format: int64 + readOnly: true + created_at: + type: integer + description: Timestamp of when the collection was created (Unix epoch in seconds) + format: int64 + readOnly: true + Field: + required: + - name + - type + type: object + properties: + name: + type: string + example: company_name + type: + type: string + example: string + optional: + type: boolean + example: true + facet: + type: boolean + example: false + index: + type: boolean + example: true + default: true + locale: + type: string + example: el + sort: + type: boolean + example: true + infix: + type: boolean + example: true + default: false + reference: + type: string + description: | + Name of a field in another collection that should be linked to this collection so that it can be joined during query. + num_dim: + type: integer + example: 256 + drop: + type: boolean + example: true + store: + type: boolean + description: | + When set to false, the field value will not be stored on disk. Default: true. + vec_dist: + type: string + description: | + The distance metric to be used for vector search. Default: `cosine`. You can also use `ip` for inner product. + range_index: + type: boolean + description: | + Enables an index optimized for range filtering on numerical fields (e.g. rating:>3.5). Default: false. + stem: + type: boolean + description: | + Values are stemmed before indexing in-memory. Default: false. + stem_dictionary: + type: string + description: Name of the stemming dictionary to use for this field + example: irregular-plurals + token_separators: + type: array + description: | + List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. + items: + type: string + minLength: 1 + maxLength: 1 + default: [] + symbols_to_index: + type: array + description: | + List of symbols or special characters to be indexed. + items: + type: string + minLength: 1 + maxLength: 1 + default: [] + embed: + type: object + required: + - from + - model_config + properties: + from: + type: array + items: + type: string + model_config: + type: object + required: + - model_name + properties: + model_name: + type: string + api_key: + type: string + url: + type: string + access_token: + type: string + refresh_token: + type: string + client_id: + type: string + client_secret: + type: string + project_id: + type: string + indexing_prefix: + type: string + query_prefix: + type: string + VoiceQueryModelCollectionConfig: + type: object + description: | + Configuration for the voice query model + properties: + model_name: + type: string + example: ts/whisper/base.en + CollectionAliasSchema: + type: object + required: + - collection_name + properties: + collection_name: + type: string + description: Name of the collection you wish to map the alias to + CollectionAlias: + type: object + required: + - collection_name + - name + properties: + name: + type: string + readOnly: true + description: Name of the collection alias + collection_name: + type: string + description: Name of the collection the alias mapped to + CollectionAliasesResponse: + type: object + required: + - aliases + properties: + aliases: + type: array + x-go-type: '[]*CollectionAlias' + items: + $ref: '#/components/schemas/CollectionAlias' + SearchResult: + type: object + properties: + facet_counts: + type: array + items: + $ref: '#/components/schemas/FacetCounts' + found: + type: integer + description: The number of documents found + found_docs: + type: integer + search_time_ms: + type: integer + description: The number of milliseconds the search took + out_of: + type: integer + description: The total number of documents in the collection + search_cutoff: + type: boolean + description: Whether the search was cut off + page: + type: integer + description: The search result page number + grouped_hits: + type: array + items: + $ref: '#/components/schemas/SearchGroupedHit' + hits: + type: array + description: The documents that matched the search query + items: + $ref: '#/components/schemas/SearchResultHit' + request_params: + type: object + required: + - collection_name + - q + - per_page + properties: + collection_name: + type: string + q: + type: string + per_page: + type: integer + voice_query: + type: object + properties: + transcribed_query: + type: string + conversation: + $ref: '#/components/schemas/SearchResultConversation' + SearchResultConversation: + type: object + required: + - answer + - conversation_history + - conversation_id + - query + properties: + answer: + type: string + conversation_history: + type: array + items: + type: object + conversation_id: + type: string + query: + type: string + SearchGroupedHit: + type: object + required: + - group_key + - hits + properties: + found: + type: integer + group_key: + type: array + items: {} + hits: + type: array + description: The documents that matched the search query + items: + $ref: '#/components/schemas/SearchResultHit' + SearchResultHit: + type: object + properties: + highlights: + type: array + description: (Deprecated) Contains highlighted portions of the search fields + items: + $ref: '#/components/schemas/SearchHighlight' + highlight: + type: object + description: Highlighted version of the matching document + additionalProperties: true + document: + type: object + description: Can be any key-value pair + text_match: + type: integer + format: int64 + text_match_info: + type: object + properties: + best_field_score: + type: string + best_field_weight: + type: integer + fields_matched: + type: integer + num_tokens_dropped: + type: integer + format: int64 + x-go-type: uint64 + score: + type: string + tokens_matched: + type: integer + typo_prefix_score: + type: integer + geo_distance_meters: + type: object + description: Can be any key-value pair + additionalProperties: + type: integer + vector_distance: + type: number + format: float + description: Distance between the query vector and matching document's vector value + example: + highlights: + company_name: + field: company_name + snippet: Stark Industries + document: + id: '124' + company_name: Stark Industries + num_employees: 5215 + country: USA + text_match: 1234556 + SearchHighlight: + type: object + properties: + field: + type: string + example: company_name + snippet: + type: string + description: Present only for (non-array) string fields + example: Stark Industries + snippets: + type: array + description: Present only for (array) string[] fields + example: + - Stark Industries + - Stark Corp + items: + type: string + value: + type: string + description: Full field value with highlighting, present only for (non-array) string fields + example: Stark Industries is a major supplier of space equipment. + values: + type: array + description: Full field value with highlighting, present only for (array) string[] fields + example: + - Stark Industries + - Stark Corp + items: + type: string + indices: + type: array + description: The indices property will be present only for string[] fields and will contain the corresponding indices of the snippets in the search field + example: 1 + items: + type: integer + matched_tokens: + type: array + items: + type: object + x-go-type: interface{} + SearchOverrideSchema: + type: object + required: + - rule + properties: + rule: + $ref: '#/components/schemas/SearchOverrideRule' + includes: + type: array + description: List of document `id`s that should be included in the search results with their corresponding `position`s. + items: + $ref: '#/components/schemas/SearchOverrideInclude' + excludes: + type: array + description: List of document `id`s that should be excluded from the search results. + items: + $ref: '#/components/schemas/SearchOverrideExclude' + filter_by: + type: string + description: | + A filter by clause that is applied to any search query that matches the override rule. + remove_matched_tokens: + type: boolean + description: | + Indicates whether search query tokens that exist in the override's rule should be removed from the search query. + metadata: + type: object + description: | + Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. + sort_by: + type: string + description: | + A sort by clause that is applied to any search query that matches the override rule. + replace_query: + type: string + description: | + Replaces the current search query with this value, when the search query matches the override rule. + filter_curated_hits: + type: boolean + description: | + When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. + effective_from_ts: + type: integer + description: | + A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. + effective_to_ts: + type: integer + description: | + A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. + stop_processing: + type: boolean + description: | + When set to true, override processing will stop at the first matching rule. When set to false override processing will continue and multiple override actions will be triggered in sequence. Overrides are processed in the lexical sort order of their id field. Default: true. + SearchOverride: + allOf: + - $ref: '#/components/schemas/SearchOverrideSchema' + - type: object + required: + - id + properties: + id: + type: string + readOnly: true + SearchOverrideDeleteResponse: + type: object + required: + - id + properties: + id: + type: string + description: The id of the override that was deleted + SearchOverrideRule: + type: object + properties: + tags: + type: array + description: List of tag values to associate with this override rule. + items: + type: string + query: + type: string + description: Indicates what search queries should be overridden + match: + type: string + description: | + Indicates whether the match on the query term should be `exact` or `contains`. If we want to match all queries that contained the word `apple`, we will use the `contains` match instead. + enum: + - exact + - contains + filter_by: + type: string + description: | + Indicates that the override should apply when the filter_by parameter in a search query exactly matches the string specified here (including backticks, spaces, brackets, etc). + SearchOverrideInclude: + type: object + required: + - id + - position + properties: + id: + type: string + description: document id that should be included + position: + type: integer + description: position number where document should be included in the search results + SearchOverrideExclude: + type: object + required: + - id + properties: + id: + type: string + description: document id that should be excluded from the search results. + SearchOverridesResponse: + type: object + required: + - overrides + properties: + overrides: + type: array + x-go-type: '[]*SearchOverride' + items: + $ref: '#/components/schemas/SearchOverride' + SearchSynonymSchema: + type: object + required: + - synonyms + properties: + root: + type: string + description: For 1-way synonyms, indicates the root word that words in the `synonyms` parameter map to. + synonyms: + type: array + description: Array of words that should be considered as synonyms. + items: + type: string + locale: + type: string + description: Locale for the synonym, leave blank to use the standard tokenizer. + symbols_to_index: + type: array + description: By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is. + items: + type: string + SearchSynonym: + allOf: + - $ref: '#/components/schemas/SearchSynonymSchema' + - type: object + required: + - id + properties: + id: + type: string + readOnly: true + SearchSynonymDeleteResponse: + type: object + required: + - id + properties: + id: + type: string + description: The id of the synonym that was deleted + SearchSynonymsResponse: + type: object + required: + - synonyms + properties: + synonyms: + type: array + x-go-type: '[]*SearchSynonym' + items: + $ref: '#/components/schemas/SearchSynonym' + HealthStatus: + type: object + required: + - ok + properties: + ok: + type: boolean + SchemaChangeStatus: + type: object + properties: + collection: + type: string + description: Name of the collection being modified + validated_docs: + type: integer + description: Number of documents that have been validated + altered_docs: + type: integer + description: Number of documents that have been altered + SuccessStatus: + type: object + required: + - success + properties: + success: + type: boolean + ApiResponse: + type: object + required: + - message + properties: + message: + type: string + ApiKeySchema: + type: object + required: + - actions + - collections + - description + properties: + value: + type: string + description: + type: string + actions: + type: array + items: + type: string + collections: + type: array + items: + type: string + expires_at: + type: integer + format: int64 + ApiKey: + allOf: + - $ref: '#/components/schemas/ApiKeySchema' + - type: object + properties: + id: + type: integer + format: int64 + readOnly: true + value_prefix: + type: string + readOnly: true + ApiKeyDeleteResponse: + type: object + required: + - id + properties: + id: + type: integer + format: int64 + description: The id of the API key that was deleted + ApiKeysResponse: + type: object + required: + - keys + properties: + keys: + type: array + x-go-type: '[]*ApiKey' + items: + $ref: '#/components/schemas/ApiKey' + ScopedKeyParameters: + type: object + properties: + filter_by: + type: string + expires_at: + type: integer + format: int64 + SnapshotParameters: + type: object + properties: + snapshot_path: + type: string + ErrorResponse: + type: object + properties: + message: + type: string + MultiSearchResult: + type: object + required: + - results + properties: + results: + type: array + items: + $ref: '#/components/schemas/MultiSearchResultItem' + conversation: + $ref: '#/components/schemas/SearchResultConversation' + MultiSearchResultItem: + allOf: + - $ref: '#/components/schemas/SearchResult' + - type: object + properties: + code: + type: integer + description: HTTP error code + format: int64 + error: + type: string + description: Error description + SearchParameters: + type: object + properties: + q: + description: The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. + type: string + query_by: + description: A list of `string` fields that should be queried against. Multiple fields are separated with a comma. + type: string + nl_query: + description: Whether to use natural language processing to parse the query. + type: boolean + nl_model_id: + description: The ID of the natural language model to use. + type: string + query_by_weights: + description: The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. + type: string + text_match_type: + description: In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. + type: string + prefix: + description: Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. + type: string + infix: + description: If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results + type: string + max_extra_prefix: + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + type: integer + max_extra_suffix: + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + type: integer + filter_by: + description: Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. + type: string + example: 'num_employees:>100 && country: [USA, UK]' + max_filter_by_candidates: + description: Controls the number of similar words that Typesense considers during fuzzy search on filter_by values. Useful for controlling prefix matches like company_name:Acm*. + type: integer + sort_by: + description: A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` + type: string + example: num_employees:desc + facet_by: + description: A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. + type: string + max_facet_values: + description: Maximum number of facet values to be returned. + type: integer + facet_query: + description: Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix "shoe". + type: string + num_typos: + description: | + The number of typographical errors (1 or 2) that would be tolerated. Default: 2 + type: string + page: + description: Results from this specific page number would be fetched. + type: integer + per_page: + description: 'Number of results to fetch per page. Default: 10' + type: integer + limit: + description: | + Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. + type: integer + offset: + description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. + type: integer + group_by: + description: You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. + type: string + group_limit: + description: | + Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 + type: integer + group_missing_values: + description: | + Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true + type: boolean + include_fields: + description: List of fields from the document to include in the search result + type: string + exclude_fields: + description: List of fields from the document to exclude in the search result + type: string + highlight_full_fields: + description: List of fields which should be highlighted fully without snippeting + type: string + highlight_affix_num_tokens: + description: | + The number of tokens that should surround the highlighted text on each side. Default: 4 + type: integer + highlight_start_tag: + description: | + The start tag used for the highlighted snippets. Default: `` + type: string + highlight_end_tag: + description: | + The end tag used for the highlighted snippets. Default: `` + type: string + enable_highlight_v1: + description: | + Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true + type: boolean + default: true + snippet_threshold: + description: | + Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 + type: integer + drop_tokens_threshold: + description: | + If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 + type: integer + drop_tokens_mode: + $ref: '#/components/schemas/DropTokensMode' + typo_tokens_threshold: + description: | + If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 + type: integer + enable_typos_for_alpha_numerical_tokens: + type: boolean + description: | + Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. + filter_curated_hits: + type: boolean + description: | + Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false + enable_synonyms: + type: boolean + description: | + If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true + synonym_prefix: + type: boolean + description: | + Allow synonym resolution on word prefixes in the query. Default: false + synonym_num_typos: + type: integer + description: | + Allow synonym resolution on typo-corrected words in the query. Default: 0 + pinned_hits: + description: | + A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. + You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + type: string + hidden_hits: + description: | + A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. + You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + type: string + override_tags: + description: Comma separated list of tags to trigger the curations rules that match the tags. + type: string + highlight_fields: + description: | + A list of custom fields that must be highlighted even if you don't query for them + type: string + split_join_tokens: + description: | + Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. Splitting/joining of tokens will only be attempted if the original query produces no results. To always trigger this behavior, set value to `always``. To disable, set value to `off`. Default is `fallback`. + type: string + pre_segmented_query: + description: | + You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. + Set this parameter to true to do the same + type: boolean + preset: + description: | + Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. + type: string + enable_overrides: + description: | + If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false + type: boolean + default: false + prioritize_exact_match: + description: | + Set this parameter to true to ensure that an exact match is ranked above the others + type: boolean + default: true + max_candidates: + description: | + Control the number of words that Typesense considers for typo and prefix searching. + type: integer + prioritize_token_position: + description: | + Make Typesense prioritize documents where the query words appear earlier in the text. + type: boolean + default: false + prioritize_num_matching_fields: + description: | + Make Typesense prioritize documents where the query words appear in more number of fields. + type: boolean + default: true + enable_typos_for_numerical_tokens: + description: | + Make Typesense disable typos for numerical tokens. + type: boolean + default: true + exhaustive_search: + description: | + Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). + type: boolean + search_cutoff_ms: + description: | + Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. + type: integer + use_cache: + description: | + Enable server side caching of search query results. By default, caching is disabled. + type: boolean + cache_ttl: + description: | + The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. + type: integer + min_len_1typo: + description: | + Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + type: integer + min_len_2typo: + description: | + Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + type: integer + vector_query: + description: | + Vector query expression for fetching documents "closest" to a given query/document vector. + type: string + remote_embedding_timeout_ms: + description: | + Timeout (in milliseconds) for fetching remote embeddings. + type: integer + remote_embedding_num_tries: + description: | + Number of times to retry fetching remote embeddings. + type: integer + facet_strategy: + description: | + Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). + type: string + stopwords: + description: | + Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. + type: string + facet_return_parent: + description: | + Comma separated string of nested facet fields whose parent object should be returned in facet response. + type: string + voice_query: + description: | + The base64 encoded audio file in 16 khz 16-bit WAV format. + type: string + conversation: + description: | + Enable conversational search. + type: boolean + conversation_model_id: + description: | + The Id of Conversation Model to be used. + type: string + conversation_id: + description: | + The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. + type: string + MultiSearchParameters: + description: | + Parameters for the multi search API. + type: object + properties: + q: + description: The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. + type: string + query_by: + description: A list of `string` fields that should be queried against. Multiple fields are separated with a comma. + type: string + query_by_weights: + description: The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. + type: string + text_match_type: + description: In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. + type: string + prefix: + description: Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. + type: string + infix: + description: If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results + type: string + max_extra_prefix: + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + type: integer + max_extra_suffix: + description: There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query "K2100" has 2 extra symbols in "6PK2100". By default, any number of prefixes/suffixes can be present for a match. + type: integer + filter_by: + description: Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. + type: string + example: 'num_employees:>100 && country: [USA, UK]' + sort_by: + description: A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` + type: string + facet_by: + description: A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. + type: string + max_facet_values: + description: Maximum number of facet values to be returned. + type: integer + facet_query: + description: Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix "shoe". + type: string + num_typos: + description: | + The number of typographical errors (1 or 2) that would be tolerated. Default: 2 + type: string + page: + description: Results from this specific page number would be fetched. + type: integer + per_page: + description: 'Number of results to fetch per page. Default: 10' + type: integer + limit: + description: | + Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. + type: integer + offset: + description: Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. + type: integer + group_by: + description: You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. + type: string + group_limit: + description: | + Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 + type: integer + group_missing_values: + description: | + Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true + type: boolean + include_fields: + description: List of fields from the document to include in the search result + type: string + exclude_fields: + description: List of fields from the document to exclude in the search result + type: string + highlight_full_fields: + description: List of fields which should be highlighted fully without snippeting + type: string + highlight_affix_num_tokens: + description: | + The number of tokens that should surround the highlighted text on each side. Default: 4 + type: integer + highlight_start_tag: + description: | + The start tag used for the highlighted snippets. Default: `` + type: string + highlight_end_tag: + description: | + The end tag used for the highlighted snippets. Default: `` + type: string + snippet_threshold: + description: | + Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 + type: integer + drop_tokens_threshold: + description: | + If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 + type: integer + drop_tokens_mode: + $ref: '#/components/schemas/DropTokensMode' + typo_tokens_threshold: + description: | + If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 + type: integer + enable_typos_for_alpha_numerical_tokens: + type: boolean + description: | + Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. + filter_curated_hits: + type: boolean + description: | + Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false + enable_synonyms: + type: boolean + description: | + If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true + synonym_prefix: + type: boolean + description: | + Allow synonym resolution on word prefixes in the query. Default: false + synonym_num_typos: + type: integer + description: | + Allow synonym resolution on typo-corrected words in the query. Default: 0 + pinned_hits: + description: | + A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. + You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + type: string + hidden_hits: + description: | + A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. + You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + type: string + override_tags: + description: Comma separated list of tags to trigger the curations rules that match the tags. + type: string + highlight_fields: + description: | + A list of custom fields that must be highlighted even if you don't query for them + type: string + pre_segmented_query: + description: | + You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. + Set this parameter to true to do the same + type: boolean + default: false + preset: + description: | + Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. + type: string + enable_overrides: + description: | + If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false + type: boolean + default: false + prioritize_exact_match: + description: | + Set this parameter to true to ensure that an exact match is ranked above the others + type: boolean + default: true + prioritize_token_position: + description: | + Make Typesense prioritize documents where the query words appear earlier in the text. + type: boolean + default: false + prioritize_num_matching_fields: + description: | + Make Typesense prioritize documents where the query words appear in more number of fields. + type: boolean + default: true + enable_typos_for_numerical_tokens: + description: | + Make Typesense disable typos for numerical tokens. + type: boolean + default: true + exhaustive_search: + description: | + Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). + type: boolean + search_cutoff_ms: + description: | + Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. + type: integer + use_cache: + description: | + Enable server side caching of search query results. By default, caching is disabled. + type: boolean + cache_ttl: + description: | + The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. + type: integer + min_len_1typo: + description: | + Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + type: integer + min_len_2typo: + description: | + Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + type: integer + vector_query: + description: | + Vector query expression for fetching documents "closest" to a given query/document vector. + type: string + remote_embedding_timeout_ms: + description: | + Timeout (in milliseconds) for fetching remote embeddings. + type: integer + remote_embedding_num_tries: + description: | + Number of times to retry fetching remote embeddings. + type: integer + facet_strategy: + description: | + Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). + type: string + stopwords: + description: | + Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. + type: string + facet_return_parent: + description: | + Comma separated string of nested facet fields whose parent object should be returned in facet response. + type: string + voice_query: + description: | + The base64 encoded audio file in 16 khz 16-bit WAV format. + type: string + conversation: + description: | + Enable conversational search. + type: boolean + conversation_model_id: + description: | + The Id of Conversation Model to be used. + type: string + conversation_id: + description: | + The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. + type: string + MultiSearchSearchesParameter: + type: object + required: + - searches + properties: + union: + type: boolean + description: When true, merges the search results from each search query into a single ordered set of hits. + searches: + type: array + items: + $ref: '#/components/schemas/MultiSearchCollectionParameters' + MultiSearchCollectionParameters: + allOf: + - $ref: '#/components/schemas/MultiSearchParameters' + - type: object + properties: + collection: + type: string + description: | + The collection to search in. + x-typesense-api-key: + type: string + description: A separate search API key for each search within a multi_search request + rerank_hybrid_matches: + type: boolean + description: | + When true, computes both text match and vector distance scores for all matches in hybrid search. Documents found only through keyword search will get a vector distance score, and documents found only through vector search will get a text match score. + default: false + FacetCounts: + type: object + properties: + counts: + type: array + items: + type: object + properties: + count: + type: integer + highlighted: + type: string + value: + type: string + parent: + type: object + field_name: + type: string + stats: + type: object + properties: + max: + type: number + format: double + min: + type: number + format: double + sum: + type: number + format: double + total_values: + type: integer + avg: + type: number + format: double + AnalyticsEventCreateResponse: + type: object + required: + - ok + properties: + ok: + type: boolean + AnalyticsEventCreateSchema: + type: object + required: + - type + - name + - data + properties: + type: + type: string + name: + type: string + data: + type: object + AnalyticsRuleUpsertSchema: + type: object + required: + - type + - params + properties: + type: + type: string + enum: + - popular_queries + - nohits_queries + - counter + params: + $ref: '#/components/schemas/AnalyticsRuleParameters' + AnalyticsRuleParameters: + type: object + required: + - source + - destination + properties: + source: + $ref: '#/components/schemas/AnalyticsRuleParametersSource' + destination: + $ref: '#/components/schemas/AnalyticsRuleParametersDestination' + limit: + type: integer + expand_query: + type: boolean + AnalyticsRuleParametersSource: + type: object + required: + - collections + properties: + collections: + type: array + items: + type: string + events: + type: array + items: + type: object + required: + - type + - weight + - name + properties: + type: + type: string + weight: + type: number + format: float + name: + type: string + AnalyticsRuleParametersDestination: + type: object + required: + - collection + properties: + collection: + type: string + counter_field: + type: string + AnalyticsRuleDeleteResponse: + type: object + required: + - name + properties: + name: + type: string + AnalyticsRuleSchema: + allOf: + - $ref: '#/components/schemas/AnalyticsRuleUpsertSchema' + - type: object + required: + - name + properties: + name: + type: string + AnalyticsRulesRetrieveSchema: + type: object + properties: + rules: + type: array + items: + $ref: '#/components/schemas/AnalyticsRuleSchema' + x-go-type: '[]*AnalyticsRuleSchema' + APIStatsResponse: + type: object + properties: + delete_latency_ms: + type: number + format: double + delete_requests_per_second: + type: number + format: double + import_latency_ms: + type: number + format: double + import_requests_per_second: + type: number + format: double + latency_ms: + type: object + x-go-type: map[string]float64 + overloaded_requests_per_second: + type: number + format: double + pending_write_batches: + type: number + format: double + requests_per_second: + type: object + x-go-type: map[string]float64 + search_latency_ms: + type: number + format: double + search_requests_per_second: + type: number + format: double + total_requests_per_second: + type: number + format: double + write_latency_ms: + type: number + format: double + write_requests_per_second: + type: number + format: double + StopwordsSetUpsertSchema: + type: object + properties: + stopwords: + type: array + items: + type: string + locale: + type: string + required: + - stopwords + example: | + {"stopwords": ["Germany", "France", "Italy"], "locale": "en"} + StopwordsSetSchema: + type: object + properties: + id: + type: string + stopwords: + type: array + items: + type: string + locale: + type: string + required: + - id + - stopwords + example: | + {"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"} + StopwordsSetRetrieveSchema: + type: object + properties: + stopwords: + $ref: '#/components/schemas/StopwordsSetSchema' + required: + - stopwords + example: | + {"stopwords": {"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"}} + StopwordsSetsRetrieveAllSchema: + type: object + properties: + stopwords: + type: array + items: + $ref: '#/components/schemas/StopwordsSetSchema' + required: + - stopwords + example: | + {"stopwords": [{"id": "countries", "stopwords": ["Germany", "France", "Italy"], "locale": "en"}]} + PresetUpsertSchema: + properties: + value: + oneOf: + - $ref: '#/components/schemas/SearchParameters' + - $ref: '#/components/schemas/MultiSearchSearchesParameter' + required: + - value + PresetSchema: + allOf: + - $ref: '#/components/schemas/PresetUpsertSchema' + - type: object + required: + - name + properties: + name: + type: string + PresetsRetrieveSchema: + type: object + required: + - presets + properties: + presets: + type: array + items: + $ref: '#/components/schemas/PresetSchema' + x-go-type: '[]*PresetSchema' + PresetDeleteSchema: + type: object + required: + - name + properties: + name: + type: string + DocumentIndexParameters: + type: object + properties: + dirty_values: + $ref: '#/components/schemas/DirtyValues' + DirtyValues: + type: string + enum: + - coerce_or_reject + - coerce_or_drop + - drop + - reject + IndexAction: + type: string + enum: + - create + - update + - upsert + - emplace + DropTokensMode: + type: string + enum: + - right_to_left + - left_to_right + - both_sides:3 + description: | + Dictates the direction in which the words in the query must be dropped when the original words in the query do not appear in any document. Values: right_to_left (default), left_to_right, both_sides:3 A note on both_sides:3 - for queries upto 3 tokens (words) in length, this mode will drop tokens from both sides and exhaustively rank all matching results. If query length is greater than 3 words, Typesense will just fallback to default behavior of right_to_left + ConversationModelCreateSchema: + required: + - model_name + - max_bytes + allOf: + - $ref: '#/components/schemas/ConversationModelUpdateSchema' + - type: object + required: + - model_name + - max_bytes + - history_collection + properties: + model_name: + description: Name of the LLM model offered by OpenAI, Cloudflare or vLLM + type: string + max_bytes: + description: | + The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. + type: integer + history_collection: + type: string + description: Typesense collection that stores the historical conversations + ConversationModelUpdateSchema: + type: object + properties: + id: + type: string + description: An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. + model_name: + description: Name of the LLM model offered by OpenAI, Cloudflare or vLLM + type: string + api_key: + description: The LLM service's API Key + type: string + history_collection: + type: string + description: Typesense collection that stores the historical conversations + account_id: + description: LLM service's account ID (only applicable for Cloudflare) + type: string + system_prompt: + description: The system prompt that contains special instructions to the LLM + type: string + ttl: + type: integer + description: | + Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) + max_bytes: + description: | + The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. + type: integer + vllm_url: + description: URL of vLLM service + type: string + ConversationModelSchema: + allOf: + - $ref: '#/components/schemas/ConversationModelCreateSchema' + - type: object + required: + - id + properties: + id: + type: string + description: An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. + StemmingDictionary: + type: object + required: + - id + - words + properties: + id: + type: string + description: Unique identifier for the dictionary + example: irregular-plurals + words: + type: array + description: List of word mappings in the dictionary + items: + type: object + required: + - word + - root + properties: + word: + type: string + description: The word form to be stemmed + example: people + root: + type: string + description: The root form of the word + example: person + NLSearchModelBase: + type: object + properties: + model_name: + type: string + description: Name of the NL model to use + api_key: + type: string + description: API key for the NL model service + api_url: + type: string + description: Custom API URL for the NL model service + max_bytes: + type: integer + description: Maximum number of bytes to process + temperature: + type: number + description: Temperature parameter for the NL model + system_prompt: + type: string + description: System prompt for the NL model + top_p: + type: number + description: Top-p parameter for the NL model (Google-specific) + top_k: + type: integer + description: Top-k parameter for the NL model (Google-specific) + stop_sequences: + type: array + items: + type: string + description: Stop sequences for the NL model (Google-specific) + api_version: + type: string + description: API version for the NL model service + project_id: + type: string + description: Project ID for GCP Vertex AI + access_token: + type: string + description: Access token for GCP Vertex AI + refresh_token: + type: string + description: Refresh token for GCP Vertex AI + client_id: + type: string + description: Client ID for GCP Vertex AI + client_secret: + type: string + description: Client secret for GCP Vertex AI + region: + type: string + description: Region for GCP Vertex AI + max_output_tokens: + type: integer + description: Maximum output tokens for GCP Vertex AI + account_id: + type: string + description: Account ID for Cloudflare-specific models + NLSearchModelCreateSchema: + allOf: + - $ref: '#/components/schemas/NLSearchModelBase' + - type: object + properties: + id: + type: string + description: Optional ID for the NL search model + NLSearchModelSchema: + allOf: + - $ref: '#/components/schemas/NLSearchModelCreateSchema' + - type: object + required: + - id + properties: + id: + type: string + description: ID of the NL search model + NLSearchModelUpdateSchema: + $ref: '#/components/schemas/NLSearchModelCreateSchema' + NLSearchModelDeleteSchema: + type: object + required: + - id + properties: + id: + type: string + description: ID of the deleted NL search model + ImportDocumentsParameters: + type: object + properties: + batch_size: + type: integer + return_id: + type: boolean + description: Returning the id of the imported documents. If you want the import response to return the ingested document's id in the response, you can use the return_id parameter. + remote_embedding_batch_size: + type: integer + return_doc: + type: boolean + action: + $ref: '#/components/schemas/IndexAction' + dirty_values: + $ref: '#/components/schemas/DirtyValues' + ExportDocumentsParameters: + type: object + properties: + filter_by: + description: Filter conditions for refining your search results. Separate multiple conditions with &&. + type: string + include_fields: + description: List of fields from the document to include in the search result + type: string + exclude_fields: + description: List of fields from the document to exclude in the search result + type: string + UpdateDocumentsParameters: + type: object + properties: + filter_by: + type: string + example: 'num_employees:>100 && country: [USA, UK]' + DeleteDocumentsParameters: + type: object + required: + - filter_by + properties: + filter_by: + type: string + example: 'num_employees:>100 && country: [USA, UK]' + batch_size: + description: Batch size parameter controls the number of documents that should be deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. + type: integer + ignore_not_found: + type: boolean + truncate: + description: When true, removes all documents from the collection while preserving the collection and its schema. + type: boolean + securitySchemes: + api_key_header: + type: apiKey + name: X-TYPESENSE-API-KEY + in: header diff --git a/typesense/Cargo.toml b/typesense/Cargo.toml index 65cecf0..d703ba9 100644 --- a/typesense/Cargo.toml +++ b/typesense/Cargo.toml @@ -26,12 +26,15 @@ sha2 = "0.10" typesense_derive = { version = "0.1.0", path = "../typesense_derive", optional = true } typesense_codegen = { version = "0.25.0", path = "../typesense_codegen" } reqwest-retry = "0.7.0" -reqwest = { version = "0.11", features = ["json"] } -reqwest-middleware = { version = "0.3", features = ["json"] } +reqwest = { version = "0.12", features = ["json"] } +reqwest-middleware = { version = "0.4.2", features = ["json"] } +thiserror = "1.0" [dev-dependencies] dotenvy = "0.15" trybuild = "1.0.42" +tokio = { version = "1", features = ["macros", "rt-multi-thread"] } +wiremock = "0.5" [target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] tokio = { version = "1.5", features = ["macros", "rt", "rt-multi-thread"] } @@ -50,3 +53,6 @@ required-features = ["derive"] name = "api_tests" path = "tests/api/lib.rs" +[[test]] +name = "client" +path = "tests/client/mod.rs" \ No newline at end of file diff --git a/typesense/src/client/analytics/events.rs b/typesense/src/client/analytics/events.rs new file mode 100644 index 0000000..55bafac --- /dev/null +++ b/typesense/src/client/analytics/events.rs @@ -0,0 +1,46 @@ +//! Provides access to the API endpoint for posting analytics events. +//! +//! An `Events` instance is created via the `Client::analytics().events()` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{analytics_api, configuration}, + models, +}; + +/// Provides methods for interacting with analytics events. +/// +/// This struct is created by calling `client.analytics().events()`. +pub struct Events<'a> { + pub(super) client: &'a Client, +} + +impl<'a> Events<'a> { + /// Creates a new `Events` instance. + pub(super) fn new(client: &'a Client) -> Self { + Self { client } + } + + /// Posts an analytics event for tracking user behavior. + /// + /// This is useful for features like "search result ranking based on popularity." + /// + /// # Arguments + /// * `schema` - An `AnalyticsEventCreateSchema` object representing the event. + pub async fn create( + &self, + schema: models::AnalyticsEventCreateSchema, + ) -> Result> + { + let params = analytics_api::CreateAnalyticsEventParams { + analytics_event_create_schema: schema, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { analytics_api::create_analytics_event(&config, params_for_move).await } + }) + .await + } +} diff --git a/typesense/src/client/analytics/mod.rs b/typesense/src/client/analytics/mod.rs new file mode 100644 index 0000000..cb49810 --- /dev/null +++ b/typesense/src/client/analytics/mod.rs @@ -0,0 +1,44 @@ +//! Provides access to the analytics API endpoints for managing rules and posting events. +//! +//! An `Analytics` instance is created via the main `Client::analytics()` method. +pub mod events; +pub mod rule; +pub mod rules; +use super::{Client, Error}; +pub use events::Events; +pub use rule::Rule; +pub use rules::Rules; + +/// Provides methods for interacting with Typesense analytics rules and events. +/// +/// This struct is created by calling `client.analytics()`. +pub struct Analytics<'a> { + pub(super) client: &'a Client, +} + +impl<'a> Analytics<'a> { + /// Creates a new `Analytics` instance. + pub(super) fn new(client: &'a Client) -> Self { + Self { client } + } + + /// Provides access to endpoints for managing a collection of analytics rules. + pub fn rules(&self) -> Rules<'a> { + Rules::new(self.client) + } + + /// Provides access to endpoints for managing a single analytics rule. + /// + /// # Arguments + /// * `rule_name` - The name of the analytics rule to manage. + pub fn rule(&self, rule_name: &'a str) -> Rule<'a> { + Rule::new(self.client, rule_name) + } + + /// Provides access to the endpoint for creating analytics events. + /// + /// Example: `client.analytics().events().create(...).await` + pub fn events(&self) -> Events<'a> { + Events::new(self.client) + } +} diff --git a/typesense/src/client/analytics/rule.rs b/typesense/src/client/analytics/rule.rs new file mode 100644 index 0000000..7fc8222 --- /dev/null +++ b/typesense/src/client/analytics/rule.rs @@ -0,0 +1,56 @@ +//! Provides access to the API endpoints for managing a single analytics rule. +//! +//! An `Rule` instance is created via the `Client::analytics().rule("rule_name")` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{analytics_api, configuration}, + models, +}; + +/// Provides methods for interacting with a specific analytics rule. +/// +/// This struct is created by calling `analytics.rule("rule_name")`. +pub struct Rule<'a> { + pub(super) client: &'a Client, + pub(super) rule_name: &'a str, +} + +impl<'a> Rule<'a> { + /// Creates a new `Rule` instance for a specific rule name. + pub(super) fn new(client: &'a Client, rule_name: &'a str) -> Self { + Self { client, rule_name } + } + + /// Retrieves the details of this specific analytics rule. + pub async fn retrieve( + &self, + ) -> Result> { + let params = analytics_api::RetrieveAnalyticsRuleParams { + rule_name: self.rule_name.to_string(), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { analytics_api::retrieve_analytics_rule(&config, params_for_move).await } + }) + .await + } + + /// Permanently deletes this specific analytics rule. + pub async fn delete( + &self, + ) -> Result> + { + let params = analytics_api::DeleteAnalyticsRuleParams { + rule_name: self.rule_name.to_string(), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { analytics_api::delete_analytics_rule(&config, params_for_move).await } + }) + .await + } +} diff --git a/typesense/src/client/analytics/rules.rs b/typesense/src/client/analytics/rules.rs new file mode 100644 index 0000000..8425048 --- /dev/null +++ b/typesense/src/client/analytics/rules.rs @@ -0,0 +1,79 @@ +//! Provides access to the API endpoints for managing analytics rules. +//! +//! An `Rules` instance is created via the `Client::analytics().rules()` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{analytics_api, configuration}, + models, +}; + +/// Provides methods for interacting with a collection of analytics rules. +/// +/// This struct is created by calling `client.analytics().rules()`. +pub struct Rules<'a> { + pub(super) client: &'a Client, +} + +impl<'a> Rules<'a> { + /// Creates a new `Rules` instance. + pub(super) fn new(client: &'a Client) -> Self { + Self { client } + } + + /// Creates a new analytics rule. + /// + /// # Arguments + /// * `schema` - An `AnalyticsRuleSchema` object describing the rule to be created. + pub async fn create( + &self, + schema: models::AnalyticsRuleSchema, + ) -> Result> { + let params = analytics_api::CreateAnalyticsRuleParams { + analytics_rule_schema: schema, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { analytics_api::create_analytics_rule(&config, params_for_move).await } + }) + .await + } + + /// Creates or updates an analytics rule with the given name. + /// + /// # Arguments + /// * `rule_name` - The name of the analytics rule to create or update. + /// * `schema` - An `AnalyticsRuleUpsertSchema` object with the rule's parameters. + pub async fn upsert( + &self, + rule_name: &str, + schema: models::AnalyticsRuleUpsertSchema, + ) -> Result> { + let params = analytics_api::UpsertAnalyticsRuleParams { + rule_name: rule_name.to_string(), + analytics_rule_upsert_schema: schema, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { analytics_api::upsert_analytics_rule(&config, params_for_move).await } + }) + .await + } + + /// Retrieves the details of all analytics rules. + pub async fn retrieve( + &self, + ) -> Result< + models::AnalyticsRulesRetrieveSchema, + Error, + > { + self.client + .execute(|config: Arc| async move { + analytics_api::retrieve_analytics_rules(&config).await + }) + .await + } +} diff --git a/typesense/src/client/collection/document.rs b/typesense/src/client/collection/document.rs new file mode 100644 index 0000000..10c3005 --- /dev/null +++ b/typesense/src/client/collection/document.rs @@ -0,0 +1,83 @@ +//! Provides access to API endpoints for a single document within a Typesense collection. +//! +//! An instance of `Document` is scoped to a specific document and is created +//! via a parent `Collection` struct, for example: +//! `client.collection("collection_name").document("document_id")` + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::apis::{configuration, documents_api}; + +/// Provides methods for interacting with a single document within a specific Typesense collection. +/// +/// This struct is created by calling a method like `collection.document("document_id")`. +pub struct Document<'a> { + pub(super) client: &'a Client, + pub(super) collection_name: &'a str, + pub(super) document_id: &'a str, +} + +impl<'a> Document<'a> { + /// Creates a new `Document` instance for a specific document ID. + /// This is intended for internal use by the parent `Documents` struct. + pub(super) fn new(client: &'a Client, collection_name: &'a str, document_id: &'a str) -> Self { + Self { + client, + collection_name, + document_id, + } + } + + /// Fetches this individual document from the collection. + pub async fn get(&self) -> Result> { + let params = documents_api::GetDocumentParams { + collection_name: self.collection_name.to_string(), + document_id: self.document_id.to_string(), + }; + + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { documents_api::get_document(&config, params_for_move).await } + }) + .await + } + + /// Updates this individual document. The update can be partial. + /// + /// # Arguments + /// * `document` - A `serde_json::Value` containing the fields to update. + pub async fn update( + &self, + document: serde_json::Value, + ) -> Result> { + let params = documents_api::UpdateDocumentParams { + collection_name: self.collection_name.to_string(), + document_id: self.document_id.to_string(), + body: document, + dirty_values: None, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { documents_api::update_document(&config, params_for_move).await } + }) + .await + } + + /// Deletes this individual document from the collection. + pub async fn delete( + &self, + ) -> Result> { + let params = documents_api::DeleteDocumentParams { + collection_name: self.collection_name.to_string(), + document_id: self.document_id.to_string(), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { documents_api::delete_document(&config, params_for_move).await } + }) + .await + } +} diff --git a/typesense/src/client/collection/documents.rs b/typesense/src/client/collection/documents.rs new file mode 100644 index 0000000..6226368 --- /dev/null +++ b/typesense/src/client/collection/documents.rs @@ -0,0 +1,311 @@ +//! Provides access to the document, search, and override-related API endpoints. +//! +//! An instance of `Documents` is scoped to a specific collection and is created +//! via the main `client.collection("collection_name").documents()` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{configuration, documents_api}, + models, +}; + +/// Provides methods for interacting with documents within a specific Typesense collection. +/// +/// This struct is created by calling `client.collection("collection_name").documents("collection_name")`. +pub struct Documents<'a> { + pub(super) client: &'a Client, + pub(super) collection_name: &'a str, +} + +impl<'a> Documents<'a> { + /// Creates a new `Documents` instance. + /// + /// This is typically called by `Client::documents()`. + pub(super) fn new(client: &'a Client, collection_name: &'a str) -> Self { + Self { + client, + collection_name, + } + } + + /// Indexes a document in the collection. + /// + /// If the document has an 'id' field, it will be used as the document's ID. + /// Otherwise, Typesense will auto-generate an ID. + /// + /// # Arguments + /// * `document` - A `serde_json::Value` representing the document. + /// * `action` - The indexing action to perform (e.g., "create", "upsert", "update"). + pub async fn index( + &self, + document: serde_json::Value, + action: &str, + ) -> Result> { + let params = documents_api::IndexDocumentParams { + collection_name: self.collection_name.to_string(), + body: document, + action: Some(action.to_string()), + dirty_values: None, // Or expose this as an argument if needed + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { documents_api::index_document(&config, params_for_move).await } + }) + .await + } + + /// Fetches an individual document from the collection by its ID. + /// + /// # Arguments + /// * `document_id` - The ID of the document to retrieve. + pub async fn retrieve( + &self, + document_id: &str, + ) -> Result> { + let params = documents_api::GetDocumentParams { + collection_name: self.collection_name.to_string(), + document_id: document_id.to_string(), + }; + + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { documents_api::get_document(&config, params_for_move).await } + }) + .await + } + + /// Updates an individual document from the collection by its ID. The update can be partial. + /// + /// # Arguments + /// * `document_id` - The ID of the document to update. + /// * `document` - A `serde_json::Value` containing the fields to update. + pub async fn update( + &self, + document_id: &str, + document: serde_json::Value, + ) -> Result> { + let params = documents_api::UpdateDocumentParams { + collection_name: self.collection_name.to_string(), + document_id: document_id.to_string(), + body: document, + dirty_values: None, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { documents_api::update_document(&config, params_for_move).await } + }) + .await + } + + /// Deletes an individual document from the collection by its ID. + /// + /// # Arguments + /// * `document_id` - The ID of the document to delete. + pub async fn delete( + &self, + document_id: &str, + ) -> Result> { + let params = documents_api::DeleteDocumentParams { + collection_name: self.collection_name.to_string(), + document_id: document_id.to_string(), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { documents_api::delete_document(&config, params_for_move).await } + }) + .await + } + + // --- Bulk Operation Methods --- + + /// Imports a batch of documents in JSONL format. + /// + /// The documents to be imported must be formatted as a newline-delimited JSON string. + /// + /// # Arguments + /// * `documents_jsonl` - A string containing the documents in JSONL format. + /// * `params` - An `ImportDocumentsParams` struct containing options like `action` and `batch_size`. + /// The `collection_name` field will be overwritten. + pub async fn import( + &self, + documents_jsonl: String, + mut params: documents_api::ImportDocumentsParams, + ) -> Result> { + params.collection_name = self.collection_name.to_string(); + params.body = documents_jsonl; + + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { documents_api::import_documents(&config, params_for_move).await } + }) + .await + } + + /// Exports all documents in a collection in JSONL format. + /// + /// # Arguments + /// * `params` - An `ExportDocumentsParams` struct containing options like `filter_by` and `include_fields`. + /// The `collection_name` field will be overwritten. + pub async fn export( + &self, + mut params: documents_api::ExportDocumentsParams, + ) -> Result> { + params.collection_name = self.collection_name.to_string(); + + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { documents_api::export_documents(&config, params_for_move).await } + }) + .await + } + + /// Deletes a batch of documents matching a specific filter condition. + /// + /// # Arguments + /// * `filter_by` - The filter condition for deleting documents. + /// * `batch_size` - The number of documents to delete at a time. + pub async fn delete_by_filter( + &self, + filter_by: &str, + batch_size: Option, + ) -> Result> + { + let params = documents_api::DeleteDocumentsParams { + collection_name: self.collection_name.to_string(), + filter_by: Some(filter_by.to_string()), + batch_size, + ignore_not_found: None, + truncate: None, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { documents_api::delete_documents(&config, params_for_move).await } + }) + .await + } + + /// Updates a batch of documents matching a specific filter condition. + /// + /// # Arguments + /// * `filter_by` - The filter condition for updating documents. + /// * `document` - A `serde_json::Value` containing the fields to update. + pub async fn update_by_filter( + &self, + filter_by: &str, + document: serde_json::Value, + ) -> Result> + { + let params = documents_api::UpdateDocumentsParams { + collection_name: self.collection_name.to_string(), + filter_by: Some(filter_by.to_string()), + body: document, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { documents_api::update_documents(&config, params_for_move).await } + }) + .await + } + + /// Searches for documents in the collection that match the given criteria. + /// + /// # Arguments + /// * `params` - A `SearchParameters` struct containing all search parameters. + /// you can construct it like this: + /// `SearchParameters { q: Some("...".into()), query_by: Some("...".into()), ..Default::default() }` + pub async fn search( + &self, + params: models::SearchParameters, + ) -> Result> { + let search_params = documents_api::SearchCollectionParams { + collection_name: self.collection_name.to_string(), + + // Map all corresponding fields directly. + cache_ttl: params.cache_ttl, + conversation: params.conversation, + conversation_id: params.conversation_id, + conversation_model_id: params.conversation_model_id, + drop_tokens_mode: params.drop_tokens_mode, + drop_tokens_threshold: params.drop_tokens_threshold, + enable_highlight_v1: params.enable_highlight_v1, + enable_overrides: params.enable_overrides, + enable_synonyms: params.enable_synonyms, + enable_typos_for_alpha_numerical_tokens: params.enable_typos_for_alpha_numerical_tokens, + enable_typos_for_numerical_tokens: params.enable_typos_for_numerical_tokens, + exclude_fields: params.exclude_fields, + exhaustive_search: params.exhaustive_search, + facet_by: params.facet_by, + facet_query: params.facet_query, + facet_return_parent: params.facet_return_parent, + facet_strategy: params.facet_strategy, + filter_by: params.filter_by, + filter_curated_hits: params.filter_curated_hits, + group_by: params.group_by, + group_limit: params.group_limit, + group_missing_values: params.group_missing_values, + hidden_hits: params.hidden_hits, + highlight_affix_num_tokens: params.highlight_affix_num_tokens, + highlight_end_tag: params.highlight_end_tag, + highlight_fields: params.highlight_fields, + highlight_full_fields: params.highlight_full_fields, + highlight_start_tag: params.highlight_start_tag, + include_fields: params.include_fields, + infix: params.infix, + limit: params.limit, + max_candidates: params.max_candidates, + max_extra_prefix: params.max_extra_prefix, + max_extra_suffix: params.max_extra_suffix, + max_facet_values: params.max_facet_values, + max_filter_by_candidates: params.max_filter_by_candidates, + min_len_1typo: params.min_len_1typo, + min_len_2typo: params.min_len_2typo, + num_typos: params.num_typos, + offset: params.offset, + override_tags: params.override_tags, + page: params.page, + per_page: params.per_page, + pinned_hits: params.pinned_hits, + pre_segmented_query: params.pre_segmented_query, + prefix: params.prefix, + preset: params.preset, + prioritize_exact_match: params.prioritize_exact_match, + prioritize_num_matching_fields: params.prioritize_num_matching_fields, + prioritize_token_position: params.prioritize_token_position, + q: params.q, + query_by: params.query_by, + query_by_weights: params.query_by_weights, + remote_embedding_num_tries: params.remote_embedding_num_tries, + remote_embedding_timeout_ms: params.remote_embedding_timeout_ms, + search_cutoff_ms: params.search_cutoff_ms, + snippet_threshold: params.snippet_threshold, + sort_by: params.sort_by, + split_join_tokens: params.split_join_tokens, + stopwords: params.stopwords, + synonym_num_typos: params.synonym_num_typos, + synonym_prefix: params.synonym_prefix, + text_match_type: params.text_match_type, + typo_tokens_threshold: params.typo_tokens_threshold, + use_cache: params.use_cache, + vector_query: params.vector_query, + voice_query: params.voice_query, + nl_model_id: params.nl_model_id, + nl_query: params.nl_query, + }; + + self.client + .execute(|config: Arc| { + let params_for_move = search_params.clone(); + async move { documents_api::search_collection(&config, params_for_move).await } + }) + .await + } +} diff --git a/typesense/src/client/collection/mod.rs b/typesense/src/client/collection/mod.rs new file mode 100644 index 0000000..3acad6f --- /dev/null +++ b/typesense/src/client/collection/mod.rs @@ -0,0 +1,145 @@ +//! Provides access to the collection and alias-related API endpoints. +//! +//! A `Collections` instance is created via the main `Client::collections()` method. + +pub mod document; +pub mod documents; +pub mod search_override; +pub mod search_overrides; +pub mod synonym; +pub mod synonyms; +use super::{Client, Error}; +pub use document::Document; +pub use documents::Documents; +pub use search_override::SearchOverride; +pub use search_overrides::SearchOverrides; +use std::sync::Arc; +pub use synonym::Synonym; +pub use synonyms::Synonyms; +use typesense_codegen::{ + apis::{collections_api, configuration}, + models, +}; + +/// Provides methods for interacting with a Typesense collection. +/// +/// This struct is created by calling `client.collection("collection_name")`. +pub struct Collection<'a> { + pub(super) client: &'a Client, + pub(super) collection_name: &'a str, +} + +impl<'a> Collection<'a> { + /// Creates a new `Collection` instance. + pub(super) fn new(client: &'a Client, collection_name: &'a str) -> Self { + Self { + client, + collection_name, + } + } + // --- Documents Accessors --- + + /// Provides access to the document-related API endpoints for a specific collection. + pub fn documents(&'a self) -> documents::Documents<'a> { + documents::Documents::new(self.client, self.collection_name) + } + + /// Provides access to the API endpoints for a single document within a Typesense collection. + pub fn document(&'a self, document_id: &'a str) -> document::Document<'a> { + document::Document::new(self.client, self.collection_name, document_id) + } + + // --- Overrides Accessors --- + + /// Provides access to endpoints for managing the collection of search overrides. + /// + /// Example: `client.collection("collection_name").search_overrides().retrieve().await` + pub fn search_overrides(&self) -> SearchOverrides<'a> { + SearchOverrides::new(self.client, self.collection_name) + } + + /// Provides access to endpoints for managing a single search override. + /// + /// # Arguments + /// * `override_id` - The ID of the search override to manage. + /// + /// Example: `client.collection("collection_name").search_override("...").retrieve().await` + pub fn search_override(&self, override_id: &'a str) -> SearchOverride<'a> { + SearchOverride::new(self.client, self.collection_name, override_id) + } + + // --- Synonym Accessors --- + + /// Provides access to endpoints for managing the collection of search synonyms. + /// + /// Example: `client.collection("collection_name").synonyms().retrieve().await` + pub fn synonyms(&self) -> Synonyms<'a> { + Synonyms::new(self.client, self.collection_name) + } + + /// Provides access to endpoints for managing a single search synonym. + /// + /// # Arguments + /// * `synonym_id` - The ID of the search synonym to manage. + /// + /// Example: `client.collection("collection_name").synonym("synonym_id").delete().await` + pub fn synonym(&self, synonym_id: &'a str) -> Synonym<'a> { + Synonym::new(self.client, self.collection_name, synonym_id) + } + + // --- Methods for this collection --- + + /// Retrieves the details of a collection, given its name. + pub async fn retrieve( + &self, + ) -> Result> { + let params = collections_api::GetCollectionParams { + collection_name: self.collection_name.to_string(), + }; + + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { collections_api::get_collection(&config, params_for_move).await } + }) + .await + } + + /// Permanently drops a collection. + /// + /// This action cannot be undone. For large collections, this might have an + /// impact on read latencies during the delete operation. + pub async fn delete( + &self, + ) -> Result> { + let params = collections_api::DeleteCollectionParams { + collection_name: self.collection_name.to_string(), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { collections_api::delete_collection(&config, params_for_move).await } + }) + .await + } + + /// Updates a collection's schema to modify the fields and their types. + /// + /// # Arguments + /// * `update_schema` - A `CollectionUpdateSchema` object describing the fields to update. + pub async fn update( + &self, + update_schema: models::CollectionUpdateSchema, + ) -> Result> { + let params = collections_api::UpdateCollectionParams { + collection_name: self.collection_name.to_string(), + collection_update_schema: update_schema, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { collections_api::update_collection(&config, params_for_move).await } + }) + .await + } +} diff --git a/typesense/src/client/collection/search_override.rs b/typesense/src/client/collection/search_override.rs new file mode 100644 index 0000000..d8ae007 --- /dev/null +++ b/typesense/src/client/collection/search_override.rs @@ -0,0 +1,65 @@ +//! Provides access to the API endpoints for managing a single search override. +//! +//! An instance of `SearchOverride` is created via the `Client::collection("collection_name").search_override("search_override_id")` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{configuration, documents_api}, + models, +}; + +/// Provides methods for interacting with a specific search override. +/// +/// This struct is created by calling `documents.search_override("override_id")`. +pub struct SearchOverride<'a> { + pub(super) client: &'a Client, + pub(super) collection_name: &'a str, + pub(super) override_id: &'a str, +} + +impl<'a> SearchOverride<'a> { + /// Creates a new `Override` instance for a specific override ID. + pub(super) fn new(client: &'a Client, collection_name: &'a str, override_id: &'a str) -> Self { + Self { + client, + collection_name, + override_id, + } + } + + /// Retrieves this specific search override. + pub async fn retrieve( + &self, + ) -> Result> { + let params = documents_api::GetSearchOverrideParams { + collection_name: self.collection_name.to_string(), + override_id: self.override_id.to_string(), + }; + + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { documents_api::get_search_override(&config, params_for_move).await } + }) + .await + } + + /// Deletes this specific search override. + pub async fn delete( + &self, + ) -> Result> + { + let params = documents_api::DeleteSearchOverrideParams { + collection_name: self.collection_name.to_string(), + override_id: self.override_id.to_string(), + }; + + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { documents_api::delete_search_override(&config, params_for_move).await } + }) + .await + } +} diff --git a/typesense/src/client/collection/search_overrides.rs b/typesense/src/client/collection/search_overrides.rs new file mode 100644 index 0000000..39b1102 --- /dev/null +++ b/typesense/src/client/collection/search_overrides.rs @@ -0,0 +1,70 @@ +//! Provides access to the API endpoints for managing a collection's search overrides. +//! +//! An instance of `SearchOverrides` is created via the `Client::collection("collection_name").search_overrides()` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{configuration, documents_api}, + models, +}; + +/// Provides methods for interacting with a collection of search overrides. +/// +/// This struct is created by calling `client.collection("collection_name").overrides()`. +pub struct SearchOverrides<'a> { + pub(super) client: &'a Client, + pub(super) collection_name: &'a str, +} + +impl<'a> SearchOverrides<'a> { + /// Creates a new `Overrides` instance. + pub(super) fn new(client: &'a Client, collection_name: &'a str) -> Self { + Self { + client, + collection_name, + } + } + + /// Creates or updates a search override. + /// + /// Overrides allow you to rank certain documents higher than others for specific queries. + /// + /// # Arguments + /// * `override_id` - The ID of the search override to create or update. + /// * `schema` - The `SearchOverrideSchema` defining the override rules. + pub async fn upsert( + &self, + override_id: &str, + schema: models::SearchOverrideSchema, + ) -> Result> { + let params = documents_api::UpsertSearchOverrideParams { + collection_name: self.collection_name.to_string(), + override_id: override_id.to_string(), + search_override_schema: schema, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { documents_api::upsert_search_override(&config, params_for_move).await } + }) + .await + } + + /// Lists all search overrides associated with the collection. + pub async fn list( + &self, + ) -> Result> + { + let params = documents_api::GetSearchOverridesParams { + collection_name: self.collection_name.to_string(), + }; + + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { documents_api::get_search_overrides(&config, params_for_move).await } + }) + .await + } +} diff --git a/typesense/src/client/collection/synonym.rs b/typesense/src/client/collection/synonym.rs new file mode 100644 index 0000000..d3b9b05 --- /dev/null +++ b/typesense/src/client/collection/synonym.rs @@ -0,0 +1,65 @@ +//! Provides access to the API endpoints for managing a single search synonym. +//! +//! An instance of `Synonym` is created via the `client.collection("collection_name").synonym("synonym_id")` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{configuration, synonyms_api}, + models, +}; + +/// Provides methods for interacting with a specific search synonym. +/// +/// This struct is created by calling `client.collection("collection_name").synonym("synonym_id")`. +pub struct Synonym<'a> { + pub(super) client: &'a Client, + pub(super) collection_name: &'a str, + pub(super) synonym_id: &'a str, +} + +impl<'a> Synonym<'a> { + /// Creates a new `Synonym` instance for a specific synonym ID. + pub(super) fn new(client: &'a Client, collection_name: &'a str, synonym_id: &'a str) -> Self { + Self { + client, + collection_name, + synonym_id, + } + } + + /// Retrieves this specific search synonym. + pub async fn get( + &self, + ) -> Result> { + let params = synonyms_api::GetSearchSynonymParams { + collection_name: self.collection_name.to_string(), + synonym_id: self.synonym_id.to_string(), + }; + + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { synonyms_api::get_search_synonym(&config, params_for_move).await } + }) + .await + } + + /// Deletes this specific search synonym. + pub async fn delete( + &self, + ) -> Result> + { + let params = synonyms_api::DeleteSearchSynonymParams { + collection_name: self.collection_name.to_string(), + synonym_id: self.synonym_id.to_string(), + }; + + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { synonyms_api::delete_search_synonym(&config, params_for_move).await } + }) + .await + } +} diff --git a/typesense/src/client/collection/synonyms.rs b/typesense/src/client/collection/synonyms.rs new file mode 100644 index 0000000..a78300b --- /dev/null +++ b/typesense/src/client/collection/synonyms.rs @@ -0,0 +1,67 @@ +//! Provides access to the API endpoints for managing a collection's search synonyms. +//! +//! An instance of `Synonyms` is created via the `client.collection("collection_name").synonyms()` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{configuration, synonyms_api}, + models, +}; + +/// Provides methods for interacting with a collection of search synonyms. +/// +/// This struct is created by calling `client.collection("collection_name").synonyms()`. +pub struct Synonyms<'a> { + pub(super) client: &'a Client, + pub(super) collection_name: &'a str, +} + +impl<'a> Synonyms<'a> { + /// Creates a new `Synonyms` instance. + pub(super) fn new(client: &'a Client, collection_name: &'a str) -> Self { + Self { + client, + collection_name, + } + } + + /// Creates or updates a search synonym. + /// + /// # Arguments + /// * `synonym_id` - The ID of the search synonym to create or update. + /// * `schema` - A `SearchSynonymSchema` object defining the equivalent terms. + pub async fn upsert( + &self, + synonym_id: &str, + schema: models::SearchSynonymSchema, + ) -> Result> { + let params = synonyms_api::UpsertSearchSynonymParams { + collection_name: self.collection_name.to_string(), + synonym_id: synonym_id.to_string(), + search_synonym_schema: schema, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { synonyms_api::upsert_search_synonym(&config, params_for_move).await } + }) + .await + } + + /// Retrieve all search synonyms associated with the collection. + pub async fn retrieve( + &self, + ) -> Result> { + let params = synonyms_api::GetSearchSynonymsParams { + collection_name: self.collection_name.to_string(), + }; + + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { synonyms_api::get_search_synonyms(&config, params_for_move).await } + }) + .await + } +} diff --git a/typesense/src/client/collections.rs b/typesense/src/client/collections.rs index cf9a6b0..7c2846f 100644 --- a/typesense/src/client/collections.rs +++ b/typesense/src/client/collections.rs @@ -1,26 +1,141 @@ -use super::Client; // Use the parent module's Client -use typesense_codegen::apis::{collections_api, Error}; -use typesense_codegen::models::{CollectionResponse, CollectionSchema}; - -// This struct holds a temporary reference to the main client. -// The lifetime parameter `'c` ensures it cannot outlive the Client it borrows from. -pub struct Collections<'c> { - pub client: &'c Client, +//! Provides access to the collection and alias-related API endpoints. +//! +//! A `Collections` instance is created via the main `client.collections()` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{collections_api, configuration}, + models, +}; + +/// Provides methods for interacting with Typesense collections and aliases. +/// +/// This struct is created by calling `client.collections()`. +pub struct Collections<'a> { + pub(super) client: &'a Client, } -// Implement the public methods on the Collections struct. -impl<'c> Collections<'c> { - /// Retrieve the details of a collection, given its name. - pub async fn get(&self, collection_name: &str) -> Result> { - // It calls back to the generic helper method on the main client. - let path = format!("/collections/{}", collection_name); - self.client.get(&path, None).await +impl<'a> Collections<'a> { + /// Creates a new `Collection` instance + pub(super) fn new(client: &'a Client) -> Self { + Self { client } + } + // --- Collection-Specific Methods --- + + /// Creates a new collection with the given schema. + /// + /// When a collection is created, you give it a name and describe the fields + /// that will be indexed from the documents added to the collection. + /// + /// # Arguments + /// * `schema` - A `CollectionSchema` object describing the collection to be created. + pub async fn create( + &self, + schema: models::CollectionSchema, + ) -> Result> { + let params = collections_api::CreateCollectionParams { + collection_schema: schema, + }; + + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { collections_api::create_collection(&config, params_for_move).await } + }) + .await + } + + /// Returns a summary of all collections in the Typesense cluster. + /// + /// The collections are returned sorted by creation date, with the most + /// recent collections appearing first. + pub async fn list_all( + &self, + ) -> Result, Error> { + self.client + .execute(|config: Arc| async move { + collections_api::get_collections(&config).await + }) + .await } - /// When a collection is created, we give it a name and describe the fields. - pub async fn create(&self, schema: &CollectionSchema) -> Result> { - self.client.post("/collections", schema, None).await + // --- Alias-Specific Methods --- + + /// Creates or updates a collection alias. + /// + /// An alias is a virtual collection name that points to a real collection. + /// Aliases are useful when you want to re-index your data in the background + /// on a new collection and then switch your application to it without any + /// changes to your code. + /// + /// # Arguments + /// * `name` - The name of the alias to create or update. + /// * `schema` - A `CollectionAliasSchema` pointing to the target collection. + pub async fn upsert_alias( + &self, + name: &str, + schema: models::CollectionAliasSchema, + ) -> Result> { + let params = collections_api::UpsertAliasParams { + alias_name: name.to_string(), + collection_alias_schema: Some(schema), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { collections_api::upsert_alias(&config, params_for_move).await } + }) + .await + } + + /// Retrieves the details of a collection alias, including the collection it points to. + /// + /// # Arguments + /// * `name` - The name of the alias to retrieve. + pub async fn get_alias( + &self, + name: &str, + ) -> Result> { + let params = collections_api::GetAliasParams { + alias_name: name.to_string(), + }; + + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { collections_api::get_alias(&config, params_for_move).await } + }) + .await } - // ... all other collection-related methods go here ... + /// Lists all aliases and the corresponding collections that they map to. + pub async fn list_aliases( + &self, + ) -> Result> { + self.client + .execute(|config: Arc| async move { + collections_api::get_aliases(&config).await + }) + .await + } + + /// Deletes a collection alias. + /// + /// # Arguments + /// * `name` - The name of the alias to delete. + pub async fn delete_alias( + &self, + name: &str, + ) -> Result> { + let params = collections_api::DeleteAliasParams { + alias_name: name.to_string(), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { collections_api::delete_alias(&config, params_for_move).await } + }) + .await + } } diff --git a/typesense/src/client/conversations/mod.rs b/typesense/src/client/conversations/mod.rs new file mode 100644 index 0000000..392070b --- /dev/null +++ b/typesense/src/client/conversations/mod.rs @@ -0,0 +1,41 @@ +//! Provides access to the API endpoints for managing conversation models. +//! +//! An `Conversations` instance is created via the main `Client::conversations()` method. + +use super::Client; +pub use model::Model; +pub use models::Models; + +pub mod model; +pub mod models; + +/// Provides methods for managing Typesense conversation models. +/// +/// This struct is created by calling `client.conversations()`. +pub struct Conversations<'a> { + pub(super) client: &'a Client, +} + +impl<'a> Conversations<'a> { + /// Creates a new `Conversations` instance. + pub(super) fn new(client: &'a Client) -> Self { + Self { client } + } + + /// Provides access to endpoints for managing the collection of conversation models. + /// + /// Example: `client.conversations().models().list().await` + pub fn models(&self) -> Models<'a> { + Models::new(self.client) + } + + /// Provides access to endpoints for managing a single conversation model. + /// + /// # Arguments + /// * `model_id` - The ID of the conversation model to manage. + /// + /// Example: `client.conversations().model("...").get().await` + pub fn model(&self, model_id: &'a str) -> Model<'a> { + Model::new(self.client, model_id) + } +} diff --git a/typesense/src/client/conversations/model.rs b/typesense/src/client/conversations/model.rs new file mode 100644 index 0000000..79540bb --- /dev/null +++ b/typesense/src/client/conversations/model.rs @@ -0,0 +1,90 @@ +//! Provides access to the API endpoints for managing a single conversation model. +//! +//! An instance of `Model` is created via the `Conversations::model()` method. + +use crate::client::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{configuration, conversations_api}, + models, +}; + +/// Provides methods for interacting with a specific conversation model. +/// +/// This struct is created by calling `client.conversations().model("model_id")`. +pub struct Model<'a> { + pub(super) client: &'a Client, + pub(super) model_id: &'a str, +} + +impl<'a> Model<'a> { + /// Creates a new `Model` instance for a specific model ID. + pub(super) fn new(client: &'a Client, model_id: &'a str) -> Self { + Self { client, model_id } + } + + /// Retrieves the details of this specific conversation model. + pub async fn retrieve( + &self, + ) -> Result< + models::ConversationModelSchema, + Error, + > { + let params = conversations_api::RetrieveConversationModelParams { + model_id: self.model_id.to_string(), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { + conversations_api::retrieve_conversation_model(&config, params_for_move).await + } + }) + .await + } + + /// Updates this specific conversation model. + /// + /// # Arguments + /// * `schema` - A `ConversationModelUpdateSchema` object with the fields to update. + pub async fn update( + &self, + schema: models::ConversationModelUpdateSchema, + ) -> Result< + models::ConversationModelSchema, + Error, + > { + let params = conversations_api::UpdateConversationModelParams { + model_id: self.model_id.to_string(), + conversation_model_update_schema: schema, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { + conversations_api::update_conversation_model(&config, params_for_move).await + } + }) + .await + } + + /// Deletes this specific conversation model. + pub async fn delete( + &self, + ) -> Result< + models::ConversationModelSchema, + Error, + > { + let params = conversations_api::DeleteConversationModelParams { + model_id: self.model_id.to_string(), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { + conversations_api::delete_conversation_model(&config, params_for_move).await + } + }) + .await + } +} diff --git a/typesense/src/client/conversations/models.rs b/typesense/src/client/conversations/models.rs new file mode 100644 index 0000000..d4b585d --- /dev/null +++ b/typesense/src/client/conversations/models.rs @@ -0,0 +1,62 @@ +//! Provides access to the API endpoints for managing conversation models. +//! +//! An instance of `Models` is created via the `Conversations::models()` method. + +use crate::client::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{configuration, conversations_api}, + models, +}; + +/// Provides methods for creating and listing conversation models. +/// +/// This struct is created by calling `conversations.models()`. +pub struct Models<'a> { + pub(super) client: &'a Client, +} + +impl<'a> Models<'a> { + /// Creates a new `Models` instance. + pub(super) fn new(client: &'a Client) -> Self { + Self { client } + } + + /// Creates a new conversation model. + /// + /// # Arguments + /// * `schema` - A `ConversationModelCreateSchema` object describing the model. + pub async fn create( + &self, + schema: models::ConversationModelCreateSchema, + ) -> Result< + models::ConversationModelSchema, + Error, + > { + let params = conversations_api::CreateConversationModelParams { + conversation_model_create_schema: schema, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { + conversations_api::create_conversation_model(&config, params_for_move).await + } + }) + .await + } + + /// Retrieves a summary of all conversation models. + pub async fn retrieve( + &self, + ) -> Result< + Vec, + Error, + > { + self.client + .execute(|config: Arc| async move { + conversations_api::retrieve_all_conversation_models(&config).await + }) + .await + } +} diff --git a/typesense/src/client/documents.rs b/typesense/src/client/documents.rs deleted file mode 100644 index e69de29..0000000 diff --git a/typesense/src/client/key.rs b/typesense/src/client/key.rs new file mode 100644 index 0000000..dcd33ef --- /dev/null +++ b/typesense/src/client/key.rs @@ -0,0 +1,56 @@ +//! Provides access to the API endpoints for managing a single API key. +//! +//! A `Key` instance is created via the `Client::key(key_id)` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{configuration, keys_api}, + models, +}; + +/// Provides methods for managing a specific Typesense API key. +/// +/// This struct is created by calling `client.key(key_id)`. +pub struct Key<'a> { + pub(super) client: &'a Client, + pub(super) key_id: i64, +} + +impl<'a> Key<'a> { + /// Creates a new `Key` instance for a specific key ID. + pub(super) fn new(client: &'a Client, key_id: i64) -> Self { + Self { client, key_id } + } + + /// Retrieves metadata about this specific API key. + /// + /// For security reasons, this endpoint only returns the key prefix and metadata, + /// not the full key value. + pub async fn retrieve(&self) -> Result> { + let params = keys_api::GetKeyParams { + key_id: self.key_id, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { keys_api::get_key(&config, params_for_move).await } + }) + .await + } + + /// Deletes this specific API key. + pub async fn delete( + &self, + ) -> Result> { + let params = keys_api::DeleteKeyParams { + key_id: self.key_id, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { keys_api::delete_key(&config, params_for_move).await } + }) + .await + } +} diff --git a/typesense/src/client/keys.rs b/typesense/src/client/keys.rs new file mode 100644 index 0000000..004ae7c --- /dev/null +++ b/typesense/src/client/keys.rs @@ -0,0 +1,55 @@ +//! Provides access to the API endpoints for managing the collection of API keys. +//! +//! An `Keys` instance is created via the `Client::keys()` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{configuration, keys_api}, + models, +}; + +/// Provides methods for managing a collection of Typesense API keys. +/// +/// This struct is created by calling `client.keys()`. +pub struct Keys<'a> { + pub(super) client: &'a Client, +} + +impl<'a> Keys<'a> { + /// Creates a new `Keys` instance. + pub(super) fn new(client: &'a Client) -> Self { + Self { client } + } + + /// Creates a new API key with fine-grained access control. + /// + /// You can restrict access on a per-collection and per-action level. + /// The full, unhashed key is only returned on creation. + /// + /// # Arguments + /// * `schema` - An `ApiKeySchema` object describing the key's permissions. + pub async fn create( + &self, + schema: models::ApiKeySchema, + ) -> Result> { + let params = keys_api::CreateKeyParams { + api_key_schema: Some(schema), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { keys_api::create_key(&config, params_for_move).await } + }) + .await + } + + /// Lists all API keys and their metadata. + pub async fn retrieve(&self) -> Result> { + self.client + .execute(|config: Arc| async move { + keys_api::get_keys(&config).await + }) + .await + } +} diff --git a/typesense/src/client/mod.rs b/typesense/src/client/mod.rs index cb27285..55e782e 100644 --- a/typesense/src/client/mod.rs +++ b/typesense/src/client/mod.rs @@ -1,144 +1,429 @@ -// in src/client/mod.rs +//! # A batteries-included, multi-node-aware client for the Typesense API. +//! +//! This module provides the main `Client` for interacting with a Typesense cluster. +//! It is designed for resilience and ease of use, incorporating features like +//! automatic failover, health checks, and a structured, ergonomic API. +//! +//! ## Key Features: +//! - **Multi-Node Operation**: Automatically manages connections to multiple Typesense nodes. +//! - **Health Checks & Failover**: Monitors node health and seamlessly fails over to healthy nodes upon encountering server or network errors. +//! - **Nearest Node Priority**: Can be configured to always prioritize a specific "nearest" node to reduce latency. +//! - **Fluent, Namespaced API**: Operations are grouped into logical namespaces like `.collections()`, `.documents("books")`, and `.operations()`, making the API discoverable and easy to use. +//! - **Built-in Retries**: Handles transient network errors with an exponential backoff policy for each node. +//! +//! ## Example Usage +//! +//! ```no_run +//! use typesense_client::client::{Client, MultiNodeConfiguration}; +//! use typesense_codegen::models; +//! use reqwest::Url; +//! use reqwest_retry::policies::ExponentialBackoff; +//! use std::time::Duration; +//! +//! #[tokio::main] +//! async fn main() -> Result<(), Box> { +//! let config = MultiNodeConfiguration { +//! nodes: vec![Url::parse("http://localhost:8108")?], +//! nearest_node: None, +//! api_key: "your-api-key".to_string(), +//! healthcheck_interval: Duration::from_secs(60), +//! retry_policy: ExponentialBackoff::builder().build_with_max_retries(3), +//! connection_timeout: Duration::from_secs(10), +//! }; +//! +//! let client = Client::new(config)?; +//! +//! // Retrieve details for a collection +//! let collection = client.collections().get("products").await?; +//! println!("Collection Name: {}", collection.name); +//! +//! // Search for a document +//! let search_params = models::SearchCollectionParams { +//! q: "phone".to_string(), +//! query_by: "name".to_string(), +//! ..Default::default() +//! }; +//! let search_results = client.documents("products").search(search_params).await?; +//! println!("Found {} hits.", search_results.found.unwrap_or(0)); +//! +//! Ok(()) +//! } +//! ``` -// Make the sub-modules public within the client module +pub mod analytics; +pub mod collection; pub mod collections; -pub mod documents; +pub mod conversations; +pub mod key; +pub mod keys; +pub mod multi_search; +pub mod operations; +pub mod preset; +pub mod presets; +pub mod stemming; +pub mod stopword; +pub mod stopwords; -// Re-export the namespace structs for easier access +pub use analytics::Analytics; +pub use collection::Collection; pub use collections::Collections; -pub use documents::Documents; +pub use conversations::Conversations; +pub use key::Key; +pub use keys::Keys; +pub use operations::Operations; +pub use preset::Preset; +pub use presets::Presets; +pub use stemming::Stemming; +pub use stopword::Stopword; +pub use stopwords::Stopwords; use reqwest::Url; -use reqwest_middleware::{ClientBuilder, ClientWithMiddleware}; -use reqwest_retry::policies::ExponentialBackoff; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::time::Duration; -use typesense_codegen::apis::Error; // Use the generated Error type +use reqwest_middleware::ClientBuilder; +use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware}; +use std::future::Future; +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, Mutex, +}; +use std::time::{Duration, Instant}; +use thiserror::Error; +use typesense_codegen::apis::{self, configuration}; -pub mod collections; -// Public configuration for the user +// --- Internal Node Health Struct --- +// This is an internal detail to track the state of each node. +#[derive(Debug)] +struct Node { + url: Url, + is_healthy: bool, + last_access_timestamp: Instant, +} + +/// Configuration for the multi-node Typesense client. +#[derive(Clone, Debug)] pub struct MultiNodeConfiguration { + /// A list of all nodes in the Typesense cluster. pub nodes: Vec, + /// An optional, preferred node to try first for every request. Ideal for reducing latency. + pub nearest_node: Option, + /// The Typesense API key used for authentication. pub api_key: String, + /// The duration after which an unhealthy node will be retried for requests. + pub healthcheck_interval: Duration, + /// The retry policy for transient network errors on a *single* node. pub retry_policy: ExponentialBackoff, + /// The timeout for each individual network request. pub connection_timeout: Duration, } -// The main public client +/// The primary error type for the Typesense client. +#[derive(Debug, Error)] +pub enum Error +where + E: std::fmt::Debug, + apis::Error: std::fmt::Display + std::fmt::Debug, +{ + /// Indicates that all configured nodes failed to process a request. + /// The source contains the last error received. + #[error("All API nodes failed to respond.")] + AllNodesFailed(#[source] Box>), + + /// A network-level error occurred within the `reqwest` middleware stack (e.g., a connection timeout). + #[error("A single node failed with a middleware error")] + Middleware(#[from] reqwest_middleware::Error), + + /// An API-level error returned by the Typesense server (e.g., 404 Not Found, 400 Bad Request). + #[error("A single node failed with an API error")] + Api(#[from] apis::Error), +} + +/// The main entry point for all interactions with the Typesense API. +/// +/// The client manages connections to multiple nodes and provides access to different +/// API resource groups (namespaces) like `collections`, `documents`, and `operations`. +#[derive(Debug)] pub struct Client { - config: MultiNodeConfiguration, - http_client: ClientWithMiddleware, // The client that handles retries on a single node + // The Client now holds the stateful Node list. + nodes: Vec>>, + nearest_node: Option>>, + api_key: String, + healthcheck_interval: Duration, + retry_policy: ExponentialBackoff, + connection_timeout: Duration, current_node_index: AtomicUsize, } -// In the `impl Client` block + impl Client { - pub fn new(config: MultiNodeConfiguration) -> Self { - let http_client = ClientBuilder::new( - reqwest::Client::builder() - .timeout(config.connection_timeout) - .build() - .expect("Failed to build reqwest client"), - ) - // The retry middleware will handle transient errors for a SINGLE request - .with(reqwest_retry::RetryTransientMiddleware::new_with_policy(config.retry_policy.clone())) - .build(); - - Self { - config, - http_client, - current_node_index: AtomicUsize::new(0), + /// Creates a new `Client` with the given configuration. + /// + /// Returns an error if the configuration contains no nodes. + pub fn new(config: MultiNodeConfiguration) -> Result { + if config.nodes.is_empty() && config.nearest_node.is_none() { + return Err("Configuration must include at least one node or a nearest_node."); } - } - // Simple round-robin node selection for trying the next node on failure - fn get_next_node(&self) -> &Url { - let index = self.current_node_index.fetch_add(1, Ordering::Relaxed); - &self.config.nodes[index % self.config.nodes.len()] + let nodes = config + .nodes + .into_iter() + .map(|url| { + Arc::new(Mutex::new(Node { + url, + is_healthy: true, + last_access_timestamp: Instant::now(), + })) + }) + .collect(); + + let nearest_node = config.nearest_node.map(|url| { + Arc::new(Mutex::new(Node { + url, + is_healthy: true, + last_access_timestamp: Instant::now(), + })) + }); + + Ok(Self { + nodes, + nearest_node, + api_key: config.api_key, + healthcheck_interval: config.healthcheck_interval, + retry_policy: config.retry_policy, + connection_timeout: config.connection_timeout, + current_node_index: AtomicUsize::new(0), + }) } - /// A generic POST request handler. - /// It tries each node in sequence if the previous one fails with a retriable error. - async fn post( - &self, - path: &str, - body: &T, - // We accept optional query params now - query_params: Option<&[(&str, String)]>, - ) -> Result> - where - T: serde::Serialize + ?Sized, - U: for<'de> serde::Deserialize<'de>, - E: for<'de> serde::Deserialize<'de> + std::fmt::Debug, - { - self.execute_request(reqwest::Method::POST, path, Some(body), query_params).await + /// Selects the next node to use for a request based on health and priority. + fn get_next_node(&self) -> Arc> { + // 1. Always try the nearest_node first if it exists. + if let Some(nearest_node_arc) = &self.nearest_node { + let node = nearest_node_arc.lock().unwrap(); + let is_due_for_check = Instant::now().duration_since(node.last_access_timestamp) + >= self.healthcheck_interval; + + if node.is_healthy || is_due_for_check { + return Arc::clone(nearest_node_arc); + } + } + + // 2. Fallback to the main list of nodes if no healthy nearest_node is available. + if self.nodes.is_empty() { + // This can only happen if ONLY a nearest_node was provided and it's unhealthy. + // We must return it to give it a chance to recover. + return Arc::clone(self.nearest_node.as_ref().unwrap()); + } + + // 3. Loop through all nodes once to find a healthy one. + for _ in 0..self.nodes.len() { + let index = self.current_node_index.fetch_add(1, Ordering::Relaxed) % self.nodes.len(); + let node_arc = &self.nodes[index]; + let node = node_arc.lock().unwrap(); + let is_due_for_check = Instant::now().duration_since(node.last_access_timestamp) + >= self.healthcheck_interval; + + if node.is_healthy || is_due_for_check { + return Arc::clone(node_arc); + } + } + + // 4. If all nodes are unhealthy and not due for a check, just pick the next one in the round-robin. + // This gives it a chance to prove it has recovered. + let index = self.current_node_index.load(Ordering::Relaxed) % self.nodes.len(); + Arc::clone(&self.nodes[index]) } - // You would create similar `get`, `delete`, and `patch` helpers - async fn get(&self, path: &str, query_params: Option<&[(&str, String)]>) -> Result> - where - U: for<'de> serde::Deserialize<'de>, - E: for<'de> serde::Deserialize<'de> + std::fmt::Debug, - { - self.execute_request::(reqwest::Method::GET, path, None, query_params).await + /// Sets the health status of a given node after a request attempt. + fn set_node_health(&self, node_arc: &Arc>, is_healthy: bool) { + let mut node = node_arc.lock().unwrap(); + node.is_healthy = is_healthy; + node.last_access_timestamp = Instant::now(); } - /// The single, generic request executor containing all the logic. - async fn execute_request(&self, method: reqwest::Method, path: &str, body: Option<&T>, query_params: Option<&[(&str, String)]>) -> Result> + /// The core execution method that handles multi-node failover and retries. + /// This internal method is called by all public API methods. + pub(super) async fn execute(&self, api_call: F) -> Result> where - T: serde::Serialize + ?Sized, - U: for<'de> serde::Deserialize<'de>, - E: for<'de> serde::Deserialize<'de> + std::fmt::Debug, + F: Fn(Arc) -> Fut, + Fut: Future>>, + E: std::fmt::Debug, + apis::Error: std::fmt::Display + std::fmt::Debug, { let mut last_error: Option> = None; + let num_nodes_to_try = self.nodes.len() + self.nearest_node.is_some() as usize; - for _ in 0..self.config.nodes.len() { - let node_url = self.get_next_node(); - let full_url = format!("{}{}", node_url.as_str().trim_end_matches('/'), path); + // Loop up to the total number of available nodes. + for _ in 0..num_nodes_to_try { + let node_arc = self.get_next_node(); + let node_url = { + // Lock is held for a very short duration. + let node = node_arc.lock().unwrap(); + node.url.clone() + }; - let mut request_builder = self.http_client.request(method.clone(), &full_url).header("X-TYPESENSE-API-KEY", &self.config.api_key); + // This client handles transient retries (e.g. network blips) on the *current node*. + let http_client = ClientBuilder::new( + reqwest::Client::builder() + .timeout(self.connection_timeout) + .build() + .expect("Failed to build reqwest client"), + ) + .with(RetryTransientMiddleware::new_with_policy( + self.retry_policy.clone(), + )) + .build(); - if let Some(body) = body { - request_builder = request_builder.json(body); - } - - if let Some(params) = query_params { - request_builder = request_builder.query(params); - } + // Create a temporary, single-node config for the generated API function. + let gen_config = Arc::new(configuration::Configuration { + base_path: node_url + .to_string() + .strip_suffix('/') + .unwrap_or(node_url.as_str()) + .to_string(), + api_key: Some(configuration::ApiKey { + prefix: None, + key: self.api_key.clone(), + }), + client: http_client, + ..Default::default() + }); - match request_builder.send().await { + match api_call(gen_config).await { Ok(response) => { - // If the request was successful, parse the response and return. - return Self::handle_response(response).await; + self.set_node_health(&node_arc, true); // Mark as healthy on success. + return Ok(response); } Err(e) => { - // This error is from the reqwest-middleware layer, likely a connection - // error or because all retries on this single node were exhausted. - // We'll log it and try the next node. - eprintln!("Request to node {} failed: {}. Trying next node.", node_url, e); - last_error = Some(Error::Middleware(e)); + let wrapped_error: Error = e.into(); + if is_retriable(&wrapped_error) { + self.set_node_health(&node_arc, false); // Mark as unhealthy on retriable error. + last_error = Some(wrapped_error); + // Continue loop to try the next node. + } else { + // Non-retriable error (e.g., 404 Not Found), fail fast. + return Err(wrapped_error); + } } } } - // If all nodes have been tried and failed, return the last error. - Err(last_error.expect("No nodes were available to try")) + + // If the loop finishes, all nodes have failed. + Err(Error::AllNodesFailed(Box::new(last_error.expect( + "No nodes were available to try, or all errors were non-retriable.", + )))) } - /// Generic response handler adapted from the generated code. - /// This parses a success response or a typed error response. - async fn handle_response(resp: reqwest::Response) -> Result> - where - U: for<'de> serde::Deserialize<'de>, - E: for<'de> serde::Deserialize<'de>, - { - let status = resp.status(); - let content = resp.text().await.map_err(Error::Reqwest)?; - - if status.is_success() { - serde_json::from_str(&content).map_err(Error::Serde) - } else { - let entity: Option = serde_json::from_str(&content).ok(); - let error = typesense_codegen::apis::ResponseContent { status, content, entity }; - Err(Error::ResponseError(error)) - } + /// Provides access to API endpoints for managing collections like `create()` and `retrieve()`. + pub fn collections(&self) -> collections::Collections<'_> { + collections::Collections::new(self) + } + + /// Provides access to API endpoints of a specific collection. + pub fn collection<'a>(&'a self, collection_name: &'a str) -> Collection<'a> { + Collection::new(self, collection_name) + } + + /// Provides access to the analytics-related API endpoints. + pub fn analytics(&self) -> Analytics<'_> { + Analytics::new(self) + } + + /// Returns a `Conversations` instance for managing conversation models. + pub fn conversations(&self) -> Conversations { + Conversations::new(self) + } + + /// Provides access to top-level, non-namespaced API endpoints like `health` and `debug`. + pub fn operations(&self) -> Operations<'_> { + Operations::new(self) + } + + /// Provides access to endpoints for managing the collection of API keys. + /// + /// Example: `client.keys().create(schema).await` + pub fn keys(&self) -> Keys<'_> { + Keys::new(self) + } + + /// Provides access to endpoints for managing a single API key. + /// + /// # Arguments + /// * `key_id` - The ID of the key to manage. + /// + /// Example: `client.key(123).delete().await` + pub fn key(&self, key_id: i64) -> Key<'_> { + Key::new(self, key_id) + } + + /// Provides access to endpoints for managing all of your presets. + /// + /// # Example + /// ``` + /// client.presets().list().await?; + /// ``` + pub fn presets(&self) -> Presets { + Presets::new(self) + } + + /// Provides access to endpoints for managing a single preset. + /// + /// # Arguments + /// * `preset_id` - The ID of the preset to manage. + /// + /// # Example + /// ``` + /// client.preset("my-preset").retrieve().await?; + /// ``` + pub fn preset<'a>(&'a self, preset_id: &'a str) -> Preset<'a> { + Preset::new(self, preset_id) + } + + /// Provides access to the stemming-related API endpoints. + /// + /// # Example + /// + /// ```no_run + /// client.stemming().dictionaries().retrieve().await?; + /// ``` + pub fn stemming(&self) -> Stemming { + Stemming::new(self) + } + + // --- Stopwords Accessors --- + + /// Provides access to endpoints for managing the collection of stopwords sets. + /// + /// Example: `client.stopwords().retrieve().await` + pub fn stopwords(&self) -> Stopwords<'_> { + Stopwords::new(self) + } + + /// Provides access to endpoints for managing a single stopwords set. + /// + /// # Arguments + /// * `set_id` - The ID of the stopwords set to manage. + /// + /// Example: `client.stopword("common_words").retrieve().await` + pub fn stopword<'a>(&'a self, set_id: &'a str) -> Stopword<'a> { + Stopword::new(self, set_id) + } +} + +/// A helper function to determine if an error is worth retrying on another node. +fn is_retriable(error: &Error) -> bool +where + E: std::fmt::Debug, + apis::Error: std::fmt::Display + std::fmt::Debug, +{ + match error { + // Network-level errors from middleware are always retriable. + Error::Middleware(_) => true, + Error::Api(api_err) => match api_err { + // Server-side errors (5xx) indicate a problem with the node, so we should try another. + apis::Error::ResponseError(content) => content.status.is_server_error(), + // Underlying reqwest errors (e.g. connection refused) are retriable. + apis::Error::Reqwest(_) => true, + // Client-side (4xx) or parsing errors are not retriable as the request is likely invalid. + _ => false, + }, + Error::AllNodesFailed(_) => false, } } diff --git a/typesense/src/client/multi_search.rs b/typesense/src/client/multi_search.rs new file mode 100644 index 0000000..33999d4 --- /dev/null +++ b/typesense/src/client/multi_search.rs @@ -0,0 +1,119 @@ +//! Provides access to the API endpoints for Multi Search. +//! +//! A `MultiSearch` instance is created via the main `Client::multi_search()` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{ + configuration::Configuration, + documents_api::{self, MultiSearchParams}, + }, + models, // The generated model structs +}; + +/// Provides methods for managing Typesense API keys. +/// +/// This struct is created by calling `client.keys()`. +pub struct MultiSearch<'a> { + pub(super) client: &'a Client, +} + +impl<'a> MultiSearch<'a> { + /// Make multiple search requests in a single HTTP request to avoid round-trip network latencies. + /// + /// You can use it in two different modes: + + /// - Federated search: each search request in the multi-search payload returns results as independently. + /// The results vector in the `multi_search` response is guaranteed to be in the same order as the queries you send in the `searches` vector in your request. + /// - Union search: the response of each search request is merged into a single unified order. + /// + /// # Arguments + /// * `search_requests` - A `MultiSearchSearchesParameter` contain multiple search requests, this will be sent in the request body. + /// * `common_search_params` - A `MultiSearchParameters` describing search parameters that are common to all searches, these will be sent as URL query parameters. + pub async fn perform( + &self, + search_requests: models::MultiSearchSearchesParameter, + common_search_params: models::MultiSearchParameters, + ) -> Result> { + let params = common_search_params; + let multi_search_params = MultiSearchParams { + // enable_highlight_v1: None, + // max_candidates: None, + // max_filter_by_candidates: None, + // split_join_tokens: None, + multi_search_searches_parameter: Some(search_requests), + + // Common URL search params + cache_ttl: params.cache_ttl, + conversation: params.conversation, + conversation_id: params.conversation_id, + conversation_model_id: params.conversation_model_id, + drop_tokens_mode: params.drop_tokens_mode, + drop_tokens_threshold: params.drop_tokens_threshold, + enable_overrides: params.enable_overrides, + enable_synonyms: params.enable_synonyms, + enable_typos_for_alpha_numerical_tokens: params.enable_typos_for_alpha_numerical_tokens, + enable_typos_for_numerical_tokens: params.enable_typos_for_numerical_tokens, + exclude_fields: params.exclude_fields, + exhaustive_search: params.exhaustive_search, + facet_by: params.facet_by, + facet_query: params.facet_query, + facet_return_parent: params.facet_return_parent, + facet_strategy: params.facet_strategy, + filter_by: params.filter_by, + filter_curated_hits: params.filter_curated_hits, + group_by: params.group_by, + group_limit: params.group_limit, + group_missing_values: params.group_missing_values, + hidden_hits: params.hidden_hits, + highlight_affix_num_tokens: params.highlight_affix_num_tokens, + highlight_end_tag: params.highlight_end_tag, + highlight_fields: params.highlight_fields, + highlight_full_fields: params.highlight_full_fields, + highlight_start_tag: params.highlight_start_tag, + include_fields: params.include_fields, + infix: params.infix, + limit: params.limit, + max_extra_prefix: params.max_extra_prefix, + max_extra_suffix: params.max_extra_suffix, + max_facet_values: params.max_facet_values, + min_len_1typo: params.min_len_1typo, + min_len_2typo: params.min_len_2typo, + num_typos: params.num_typos, + offset: params.offset, + override_tags: params.override_tags, + page: params.page, + per_page: params.per_page, + pinned_hits: params.pinned_hits, + pre_segmented_query: params.pre_segmented_query, + prefix: params.prefix, + preset: params.preset, + prioritize_exact_match: params.prioritize_exact_match, + prioritize_num_matching_fields: params.prioritize_num_matching_fields, + prioritize_token_position: params.prioritize_token_position, + q: params.q, + query_by: params.query_by, + query_by_weights: params.query_by_weights, + remote_embedding_num_tries: params.remote_embedding_num_tries, + remote_embedding_timeout_ms: params.remote_embedding_timeout_ms, + search_cutoff_ms: params.search_cutoff_ms, + snippet_threshold: params.snippet_threshold, + sort_by: params.sort_by, + stopwords: params.stopwords, + synonym_num_typos: params.synonym_num_typos, + synonym_prefix: params.synonym_prefix, + text_match_type: params.text_match_type, + typo_tokens_threshold: params.typo_tokens_threshold, + use_cache: params.use_cache, + vector_query: params.vector_query, + voice_query: params.voice_query, + }; + self.client + .execute(|config: Arc| { + let params_for_move = multi_search_params.clone(); + async move { documents_api::multi_search(&config, params_for_move).await } + }) + .await + } +} diff --git a/typesense/src/client/operations.rs b/typesense/src/client/operations.rs new file mode 100644 index 0000000..152022c --- /dev/null +++ b/typesense/src/client/operations.rs @@ -0,0 +1,52 @@ +//! Provides access to top-level, non-namespaced API endpoints. +//! +//! An `Operations` instance is created via the main `Client::operations()` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{ + configuration, + debug_api, + health_api, // Add this line + }, + models, +}; + +/// Provides methods for top-level, non-namespaced Typesense operations. +/// +/// This struct is created by calling `client.operations()`. +pub struct Operations<'a> { + pub(super) client: &'a Client, +} + +impl<'a> Operations<'a> { + /// Creates a new `Operations` instance + pub(super) fn new(client: &'a Client) -> Self { + Self { client } + } + /// Retrieves debugging information from a Typesense node. + /// + /// This method will try nodes in sequence according to the health policy + /// until it gets a successful response. The returned information pertains + /// to the specific node that responded successfully. + pub async fn debug(&self) -> Result> { + self.client + .execute(|config: Arc| async move { + debug_api::debug(&config).await + }) + .await + } + + /// Checks if a Typesense node is healthy and ready to accept requests. + /// + /// This method will try nodes in sequence according to the health policy + /// until it gets a successful response (`{"ok": true}`). + pub async fn health(&self) -> Result> { + self.client + .execute(|config: Arc| async move { + health_api::health(&config).await + }) + .await + } +} diff --git a/typesense/src/client/preset.rs b/typesense/src/client/preset.rs new file mode 100644 index 0000000..652f632 --- /dev/null +++ b/typesense/src/client/preset.rs @@ -0,0 +1,55 @@ +//! Provides access to the API endpoints for managing a single preset. +//! +//! A `Preset` instance is created via the main `Client::preset(id)` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{configuration, presets_api}, + models, +}; + +/// Provides methods for managing a single Typesense preset. +/// +/// This struct is created by calling `client.preset(id)`. +pub struct Preset<'a> { + pub(super) client: &'a Client, + pub(super) preset_id: &'a str, +} + +impl<'a> Preset<'a> { + /// Creates a new `Preset` instance. + pub(super) fn new(client: &'a Client, preset_id: &'a str) -> Self { + Self { client, preset_id } + } + + /// Retrieves the details of a preset, given its ID. + pub async fn retrieve( + &self, + ) -> Result> { + let params = presets_api::RetrievePresetParams { + preset_id: self.preset_id.to_string(), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { presets_api::retrieve_preset(&config, params_for_move).await } + }) + .await + } + + /// Permanently deletes a preset, given its ID. + pub async fn delete( + &self, + ) -> Result> { + let params = presets_api::DeletePresetParams { + preset_id: self.preset_id.to_string(), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { presets_api::delete_preset(&config, params_for_move).await } + }) + .await + } +} diff --git a/typesense/src/client/presets.rs b/typesense/src/client/presets.rs new file mode 100644 index 0000000..ee82ed0 --- /dev/null +++ b/typesense/src/client/presets.rs @@ -0,0 +1,59 @@ +//! Provides access to the API endpoints for managing presets. +//! +//! Presets are a set of search parameters that can be applied to a search query by using the `preset` search parameter. +//! +//! A `Presets` instance is created via the main `Client::presets()` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{configuration, presets_api}, + models, +}; + +/// Provides methods for managing all of your Typesense presets. +/// +/// This struct is created by calling `client.presets()`. +pub struct Presets<'a> { + pub(super) client: &'a Client, +} + +impl<'a> Presets<'a> { + /// Creates a new `Presets` instance. + pub(super) fn new(client: &'a Client) -> Self { + Self { client } + } + + /// Retrieves the details of all presets. + pub async fn retrieve( + &self, + ) -> Result> { + self.client + .execute(|config: Arc| async move { + presets_api::retrieve_all_presets(&config).await + }) + .await + } + + /// Creates or updates an existing preset. + /// + /// # Arguments + /// * `preset_id` - The ID of the preset to create or update. + /// * `schema` - A `PresetUpsertSchema` object with the preset's value. + pub async fn upsert( + &self, + preset_id: &str, + schema: models::PresetUpsertSchema, + ) -> Result> { + let params = presets_api::UpsertPresetParams { + preset_id: preset_id.to_string(), + preset_upsert_schema: schema, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { presets_api::upsert_preset(&config, params_for_move).await } + }) + .await + } +} diff --git a/typesense/src/client/stemming/dictionaries.rs b/typesense/src/client/stemming/dictionaries.rs new file mode 100644 index 0000000..8ea1c40 --- /dev/null +++ b/typesense/src/client/stemming/dictionaries.rs @@ -0,0 +1,62 @@ +//! Provides access to the API endpoints for managing the collection of stemming dictionaries. +//! +//! A `Dictionaries` instance is created via the `Client::stemming().dictionaries()` method. + +use crate::client::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{configuration, stemming_api}, + models, +}; + +/// Provides methods for interacting with the collection of stemming dictionaries. +/// +/// This struct is created by calling `client.stemming().dictionaries()`. +pub struct Dictionaries<'a> { + pub(super) client: &'a Client, +} + +impl<'a> Dictionaries<'a> { + /// Creates a new `Dictionaries` instance. + pub(super) fn new(client: &'a Client) -> Self { + Self { client } + } + + /// Imports a stemming dictionary from a JSONL file content. + /// + /// This creates or updates a dictionary with the given ID. + /// + /// # Arguments + /// * `dictionary_id` - The ID to assign to the dictionary. + /// * `dictionary_jsonl` - A string containing the word mappings in JSONL format. + pub async fn import( + &self, + dictionary_id: &str, + dictionary_jsonl: String, + ) -> Result> { + let params = stemming_api::ImportStemmingDictionaryParams { + id: dictionary_id.to_string(), + body: dictionary_jsonl, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { stemming_api::import_stemming_dictionary(&config, params_for_move).await } + }) + .await + } + + /// Retrieves a list of all available stemming dictionaries. + pub async fn list( + &self, + ) -> Result< + models::ListStemmingDictionaries200Response, + Error, + > { + self.client + .execute(|config: Arc| async move { + stemming_api::list_stemming_dictionaries(&config).await + }) + .await + } +} diff --git a/typesense/src/client/stemming/dictionary.rs b/typesense/src/client/stemming/dictionary.rs new file mode 100644 index 0000000..8995e2d --- /dev/null +++ b/typesense/src/client/stemming/dictionary.rs @@ -0,0 +1,59 @@ +//! Provides access to the API endpoints for managing a single stemming dictionary. +//! +//! An instance of `Dictionary` is created via the `Client::stemming().dictionary()` method. + +use crate::client::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{configuration, stemming_api}, + models, +}; + +/// Provides methods for interacting with a specific stemming dictionary. +/// +/// This struct is created by calling `client.stemming().dictionary("dictionary_id")`. +pub struct Dictionary<'a> { + pub(super) client: &'a Client, + pub(super) dictionary_id: &'a str, +} + +impl<'a> Dictionary<'a> { + /// Creates a new `Dictionary` instance for a specific dictionary ID. + pub(super) fn new(client: &'a Client, dictionary_id: &'a str) -> Self { + Self { + client, + dictionary_id, + } + } + + /// Retrieves the details of this specific stemming dictionary. + pub async fn get( + &self, + ) -> Result> { + let params = stemming_api::GetStemmingDictionaryParams { + dictionary_id: self.dictionary_id.to_string(), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { stemming_api::get_stemming_dictionary(&config, params_for_move).await } + }) + .await + } + + // Deletes this specific stemming dictionary. + // pub async fn delete( + // &self, + // ) -> Result> + // { + // let params = stemming_api::DeleteStemmingDictionaryParams { + // dictionary_id: self.dictionary_id.to_string(), + // }; + // self.client + // .execute(|config: Arc| { + // let params_for_move = params.clone(); + // async move { stemming_api::delete_stemming_dictionary(&config, params_for_move).await } + // }) + // .await + // } +} diff --git a/typesense/src/client/stemming/mod.rs b/typesense/src/client/stemming/mod.rs new file mode 100644 index 0000000..997d27c --- /dev/null +++ b/typesense/src/client/stemming/mod.rs @@ -0,0 +1,37 @@ +//! Provides access to the API endpoints for managing stemming. +//! +//! An instance of `Stemming` is created via the `Client::stemming()` method. + +pub mod dictionaries; +pub mod dictionary; + +use super::Client; +use dictionaries::Dictionaries; +use dictionary::Dictionary; + +/// Provides methods for managing Typesense stemming. +/// +/// This struct is created by calling `client.stemming()`. +pub struct Stemming<'a> { + pub(super) client: &'a Client, +} + +impl<'a> Stemming<'a> { + /// Creates a new `Stemming` instance. + pub(super) fn new(client: &'a Client) -> Self { + Self { client } + } + + /// Provides access to endpoints for managing the collection of dictionaries. + pub fn dictionaries(&self) -> Dictionaries<'a> { + Dictionaries::new(self.client) + } + + /// Provides access to endpoints for managing a single dictionary. + /// + /// # Arguments + /// * `dictionary_id` - The ID of the dictionary to manage. + pub fn dictionary(&self, dictionary_id: &'a str) -> Dictionary<'a> { + Dictionary::new(self.client, dictionary_id) + } +} diff --git a/typesense/src/client/stopword.rs b/typesense/src/client/stopword.rs new file mode 100644 index 0000000..6917238 --- /dev/null +++ b/typesense/src/client/stopword.rs @@ -0,0 +1,57 @@ +//! Provides access to the API endpoints for managing a single stopwords set. +//! +//! An instance of `Stopword` is created via the `Client::stopword()` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{configuration, stopwords_api}, + models, +}; + +/// Provides methods for interacting with a specific stopwords set. +/// +/// This struct is created by calling `client.stopword("set_id")`. +pub struct Stopword<'a> { + pub(super) client: &'a Client, + pub(super) set_id: &'a str, +} + +impl<'a> Stopword<'a> { + /// Creates a new `Stopword` instance for a specific set ID. + pub(super) fn new(client: &'a Client, set_id: &'a str) -> Self { + Self { client, set_id } + } + + /// Retrieves the details of this specific stopwords set. + pub async fn retrieve( + &self, + ) -> Result> + { + let params = stopwords_api::RetrieveStopwordsSetParams { + set_id: self.set_id.to_string(), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { stopwords_api::retrieve_stopwords_set(&config, params_for_move).await } + }) + .await + } + + /// Permanently deletes this specific stopwords set. + pub async fn delete( + &self, + ) -> Result> + { + let params = stopwords_api::DeleteStopwordsSetParams { + set_id: self.set_id.to_string(), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { stopwords_api::delete_stopwords_set(&config, params_for_move).await } + }) + .await + } +} diff --git a/typesense/src/client/stopwords.rs b/typesense/src/client/stopwords.rs new file mode 100644 index 0000000..dbbb9b0 --- /dev/null +++ b/typesense/src/client/stopwords.rs @@ -0,0 +1,60 @@ +//! Provides access to the API endpoints for managing stopwords sets. +//! +//! A `Stopwords` instance is created via the main `Client::stopwords()` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{configuration, stopwords_api}, + models, +}; + +/// Provides methods for managing Typesense stopwords sets. +/// +/// This struct is created by calling `client.stopwords()`. +pub struct Stopwords<'a> { + pub(super) client: &'a Client, +} + +impl<'a> Stopwords<'a> { + /// Creates a new `Stopwords` instance. + pub(super) fn new(client: &'a Client) -> Self { + Self { client } + } + + /// Creates or updates an existing stopwords set. + /// + /// # Arguments + /// * `set_id` - The ID of the stopwords set to create or update. + /// * `schema` - A `StopwordsSetUpsertSchema` object with the stopwords to upsert. + pub async fn upsert( + &self, + set_id: &str, + schema: models::StopwordsSetUpsertSchema, + ) -> Result> { + let params = stopwords_api::UpsertStopwordsSetParams { + set_id: set_id.to_string(), + stopwords_set_upsert_schema: schema, + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { stopwords_api::upsert_stopwords_set(&config, params_for_move).await } + }) + .await + } + + /// Retrieves the details of all stopwords sets. + pub async fn retrieve( + &self, + ) -> Result< + models::StopwordsSetsRetrieveAllSchema, + Error, + > { + self.client + .execute(|config: Arc| async move { + stopwords_api::retrieve_stopwords_sets(&config).await + }) + .await + } +} diff --git a/typesense/src/collection_schema.rs b/typesense/src/collection_schema.rs index c28207a..0951e76 100644 --- a/typesense/src/collection_schema.rs +++ b/typesense/src/collection_schema.rs @@ -82,6 +82,7 @@ impl CollectionSchemaBuilder { token_separators: self.token_separators, enable_nested_fields: self.enable_nested_fields, symbols_to_index: self.symbols_to_index, + ..Default::default() } } } diff --git a/typesense/src/field/mod.rs b/typesense/src/field/mod.rs index 934abed..4cd98b6 100644 --- a/typesense/src/field/mod.rs +++ b/typesense/src/field/mod.rs @@ -105,6 +105,7 @@ impl FieldBuilder { num_dim: self.num_dim, drop: self.drop, embed: self.embed, + ..Default::default() } } } diff --git a/typesense/tests/client/mod.rs b/typesense/tests/client/mod.rs new file mode 100644 index 0000000..cf8192b --- /dev/null +++ b/typesense/tests/client/mod.rs @@ -0,0 +1,289 @@ +use reqwest::Url; +use reqwest_retry::policies::ExponentialBackoff; +use std::time::Duration; +use typesense::client::*; +use typesense::models::CollectionResponse; +use wiremock::matchers::{header, method, path}; +use wiremock::{Mock, MockServer, ResponseTemplate}; + +// Helper to create a mock Typesense server for a successful collection retrieval. +async fn setup_mock_server_ok(server: &MockServer, collection_name: &str) { + let response_body = CollectionResponse { + name: collection_name.to_string(), + ..Default::default() + }; + + Mock::given(method("GET")) + .and(path(format!("/collections/{}", collection_name))) + .and(header("X-TYPESENSE-API-KEY", "test-key")) + .respond_with(ResponseTemplate::new(200).set_body_json(response_body)) + .mount(server) + .await; +} + +// Helper to create a mock Typesense server that returns a server error. +async fn setup_mock_server_503(server: &MockServer, collection_name: &str) { + Mock::given(method("GET")) + .and(path(format!("/collections/{}", collection_name))) + .respond_with(ResponseTemplate::new(503)) + .mount(server) + .await; +} + +// Helper to create a mock Typesense server that returns a 404 Not Found error. +async fn setup_mock_server_404(server: &MockServer, collection_name: &str) { + Mock::given(method("GET")) + .and(path(format!("/collections/{}", collection_name))) + .respond_with(ResponseTemplate::new(404)) + .mount(server) + .await; +} + +// Helper function to create a client configuration for tests. +fn get_test_config(nodes: Vec, nearest_node: Option) -> MultiNodeConfiguration { + MultiNodeConfiguration { + nodes, + nearest_node, + api_key: "test-key".to_string(), + healthcheck_interval: Duration::from_secs(60), + retry_policy: ExponentialBackoff::builder().build_with_max_retries(0), + connection_timeout: Duration::from_secs(1), + } +} + +#[tokio::test] +async fn test_success_on_first_node() { + let server1 = MockServer::start().await; + setup_mock_server_ok(&server1, "products").await; + + let config = get_test_config(vec![Url::parse(&server1.uri()).unwrap()], None); + let client = Client::new(config).unwrap(); + + let result = client.collection("products").retrieve().await; + + assert!(result.is_ok()); + assert_eq!(result.unwrap().name, "products"); + // Check that the server received exactly one request. + assert_eq!(server1.received_requests().await.unwrap().len(), 1); +} + +#[tokio::test] +async fn test_failover_to_second_node() { + let server1 = MockServer::start().await; + let server2 = MockServer::start().await; + setup_mock_server_503(&server1, "products").await; + setup_mock_server_ok(&server2, "products").await; + + let config = get_test_config( + vec![ + Url::parse(&server1.uri()).unwrap(), + Url::parse(&server2.uri()).unwrap(), + ], + None, + ); + let client = Client::new(config).unwrap(); + + let result = client.collection("products").retrieve().await; + assert!(result.is_ok()); + + // The first server should have been tried and failed. + assert_eq!(server1.received_requests().await.unwrap().len(), 1); + // The second server should have been tried and succeeded. + assert_eq!(server2.received_requests().await.unwrap().len(), 1); +} + +#[tokio::test] +async fn test_nearest_node_is_prioritized() { + let nearest_server = MockServer::start().await; + let regular_server = MockServer::start().await; + setup_mock_server_ok(&nearest_server, "products").await; + setup_mock_server_ok(®ular_server, "products").await; + + let config = get_test_config( + vec![Url::parse(®ular_server.uri()).unwrap()], + Some(Url::parse(&nearest_server.uri()).unwrap()), + ); + let client = Client::new(config).unwrap(); + + let result = client.collection("products").retrieve().await; + assert!(result.is_ok()); + + // Only the nearest node should have received a request. + assert_eq!(nearest_server.received_requests().await.unwrap().len(), 1); + assert_eq!(regular_server.received_requests().await.unwrap().len(), 0); +} + +#[tokio::test] +async fn test_failover_from_nearest_to_regular_node() { + let nearest_server = MockServer::start().await; + let regular_server = MockServer::start().await; + setup_mock_server_503(&nearest_server, "products").await; + setup_mock_server_ok(®ular_server, "products").await; + + let config = get_test_config( + vec![Url::parse(®ular_server.uri()).unwrap()], + Some(Url::parse(&nearest_server.uri()).unwrap()), + ); + let client = Client::new(config).unwrap(); + + let result = client.collection("products").retrieve().await; + assert!(result.is_ok()); + + // Nearest node should have failed. + assert_eq!(nearest_server.received_requests().await.unwrap().len(), 1); + // Regular node should have succeeded. + assert_eq!(regular_server.received_requests().await.unwrap().len(), 1); +} + +#[tokio::test] +async fn test_round_robin_failover() { + let server1 = MockServer::start().await; + let server2 = MockServer::start().await; + let server3 = MockServer::start().await; + setup_mock_server_503(&server1, "products").await; + setup_mock_server_503(&server2, "products").await; + setup_mock_server_ok(&server3, "products").await; + + let config = get_test_config( + vec![ + Url::parse(&server1.uri()).unwrap(), + Url::parse(&server2.uri()).unwrap(), + Url::parse(&server3.uri()).unwrap(), + ], + None, + ); + let client = Client::new(config).unwrap(); + + // First request should fail over to the third node + let result = client.collection("products").retrieve().await; + assert!(result.is_ok()); + assert_eq!(server1.received_requests().await.unwrap().len(), 1); + assert_eq!(server2.received_requests().await.unwrap().len(), 1); + assert_eq!(server3.received_requests().await.unwrap().len(), 1); + + // The next request should start from the now-healthy 3rd node, but round-robin + // logic will have advanced the internal counter. Let's see it wrap around. + // We expect the next attempt to be on server 3 again, then 1 (if 3 fails). + + // Reset server 3 to also fail + server3.reset().await; + setup_mock_server_503(&server3, "products").await; + // Make server 1 healthy again + server1.reset().await; + setup_mock_server_ok(&server1, "products").await; + + let result2 = client.collection("products").retrieve().await; + assert!(result2.is_ok()); + + // Server 3 was tried first and failed. + assert_eq!(server3.received_requests().await.unwrap().len(), 1); + // Server 1 was tried next and succeeded. + assert_eq!(server1.received_requests().await.unwrap().len(), 1); + // Server 2 was not touched this time. + assert_eq!(server2.received_requests().await.unwrap().len(), 1); // Remains 1 from first call +} + +#[tokio::test] +async fn test_health_check_and_node_recovery() { + let server1 = MockServer::start().await; + let server2 = MockServer::start().await; + + setup_mock_server_503(&server1, "products").await; + setup_mock_server_ok(&server2, "products").await; + + let mut config = get_test_config( + vec![ + Url::parse(&server1.uri()).unwrap(), + Url::parse(&server2.uri()).unwrap(), + ], + None, + ); + // Use a very short healthcheck interval for the test + config.healthcheck_interval = Duration::from_millis(500); + let client = Client::new(config).unwrap(); + + // 1. First request fails over to server2, marking server1 as unhealthy. + assert!(client.collection("products").retrieve().await.is_ok()); + assert_eq!(server1.received_requests().await.unwrap().len(), 1); + assert_eq!(server2.received_requests().await.unwrap().len(), 1); + + // 2. Immediate second request should go directly to server2. + assert!(client.collection("products").retrieve().await.is_ok()); + assert_eq!(server1.received_requests().await.unwrap().len(), 1); // No new request + assert_eq!(server2.received_requests().await.unwrap().len(), 2); // Got another request + + // 3. Wait for the healthcheck interval to pass. + tokio::time::sleep(Duration::from_millis(600)).await; + + // 4. Make server1 healthy again. + server1.reset().await; + setup_mock_server_ok(&server1, "products").await; + + // 5. The next request should try server1 again (due to healthcheck expiry) and succeed. + assert!(client.collection("products").retrieve().await.is_ok()); + assert_eq!(server1.received_requests().await.unwrap().len(), 1); // Server 1 received its first successful req + assert_eq!(server2.received_requests().await.unwrap().len(), 2); // No new request for server 2 +} + +#[tokio::test] +async fn test_all_nodes_fail() { + let server1 = MockServer::start().await; + let server2 = MockServer::start().await; + setup_mock_server_503(&server1, "products").await; + setup_mock_server_503(&server2, "products").await; + + let config = get_test_config( + vec![ + Url::parse(&server1.uri()).unwrap(), + Url::parse(&server2.uri()).unwrap(), + ], + None, + ); + let client = Client::new(config).unwrap(); + + let result = client.collection("products").retrieve().await; + assert!(result.is_err()); + + match result.err().unwrap() { + Error::AllNodesFailed(_) => { /* This is the expected outcome */ } + _ => panic!("Expected AllNodesFailed error"), + } + + // Both servers should have been tried. + assert_eq!(server1.received_requests().await.unwrap().len(), 1); + assert_eq!(server2.received_requests().await.unwrap().len(), 1); +} + +#[tokio::test] +async fn test_fail_fast_on_non_retriable_error() { + let server1 = MockServer::start().await; + let server2 = MockServer::start().await; + + setup_mock_server_404(&server1, "products").await; + setup_mock_server_ok(&server2, "products").await; + + let config = get_test_config( + vec![ + Url::parse(&server1.uri()).unwrap(), + Url::parse(&server2.uri()).unwrap(), + ], + None, + ); + let client = Client::new(config).unwrap(); + + let result = client.collection("products").retrieve().await; + assert!(result.is_err()); + + // Check that the error is the non-retriable API error. + match result.err().unwrap() { + Error::Api(typesense_codegen::apis::Error::ResponseError(content)) => { + assert_eq!(content.status, reqwest::StatusCode::NOT_FOUND); + } + e => panic!("Expected an API error, but got {:?}", e), + } + + // The first server should have been tried. + assert_eq!(server1.received_requests().await.unwrap().len(), 1); + // The second server should NOT have been tried. + assert_eq!(server2.received_requests().await.unwrap().len(), 0); +} diff --git a/typesense_codegen/.openapi-generator/FILES b/typesense_codegen/.openapi-generator/FILES index 6df3cf7..58b2ca8 100644 --- a/typesense_codegen/.openapi-generator/FILES +++ b/typesense_codegen/.openapi-generator/FILES @@ -33,12 +33,14 @@ docs/CurationApi.md docs/Debug200Response.md docs/DebugApi.md docs/DeleteDocuments200Response.md +docs/DeleteDocumentsParameters.md docs/DeleteStopwordsSet200Response.md docs/DirtyValues.md docs/DocumentIndexParameters.md docs/DocumentsApi.md docs/DropTokensMode.md docs/ErrorResponse.md +docs/ExportDocumentsParameters.md docs/FacetCounts.md docs/FacetCountsCountsInner.md docs/FacetCountsStats.md @@ -47,6 +49,7 @@ docs/FieldEmbed.md docs/FieldEmbedModelConfig.md docs/HealthApi.md docs/HealthStatus.md +docs/ImportDocumentsParameters.md docs/IndexAction.md docs/KeysApi.md docs/ListStemmingDictionaries200Response.md @@ -55,6 +58,11 @@ docs/MultiSearchParameters.md docs/MultiSearchResult.md docs/MultiSearchResultItem.md docs/MultiSearchSearchesParameter.md +docs/NlSearchModelBase.md +docs/NlSearchModelCreateSchema.md +docs/NlSearchModelDeleteSchema.md +docs/NlSearchModelSchema.md +docs/NlSearchModelsApi.md docs/OperationsApi.md docs/OverrideApi.md docs/PresetDeleteSchema.md @@ -97,6 +105,7 @@ docs/StopwordsSetsRetrieveAllSchema.md docs/SuccessStatus.md docs/SynonymsApi.md docs/UpdateDocuments200Response.md +docs/UpdateDocumentsParameters.md docs/VoiceQueryModelCollectionConfig.md git_push.sh src/apis/analytics_api.rs @@ -109,6 +118,7 @@ src/apis/documents_api.rs src/apis/health_api.rs src/apis/keys_api.rs src/apis/mod.rs +src/apis/nl_search_models_api.rs src/apis/operations_api.rs src/apis/override_api.rs src/apis/presets_api.rs @@ -143,11 +153,13 @@ src/models/conversation_model_schema.rs src/models/conversation_model_update_schema.rs src/models/debug_200_response.rs src/models/delete_documents_200_response.rs +src/models/delete_documents_parameters.rs src/models/delete_stopwords_set_200_response.rs src/models/dirty_values.rs src/models/document_index_parameters.rs src/models/drop_tokens_mode.rs src/models/error_response.rs +src/models/export_documents_parameters.rs src/models/facet_counts.rs src/models/facet_counts_counts_inner.rs src/models/facet_counts_stats.rs @@ -155,6 +167,7 @@ src/models/field.rs src/models/field_embed.rs src/models/field_embed_model_config.rs src/models/health_status.rs +src/models/import_documents_parameters.rs src/models/index_action.rs src/models/list_stemming_dictionaries_200_response.rs src/models/mod.rs @@ -163,6 +176,10 @@ src/models/multi_search_parameters.rs src/models/multi_search_result.rs src/models/multi_search_result_item.rs src/models/multi_search_searches_parameter.rs +src/models/nl_search_model_base.rs +src/models/nl_search_model_create_schema.rs +src/models/nl_search_model_delete_schema.rs +src/models/nl_search_model_schema.rs src/models/preset_delete_schema.rs src/models/preset_schema.rs src/models/preset_upsert_schema.rs @@ -199,4 +216,5 @@ src/models/stopwords_set_upsert_schema.rs src/models/stopwords_sets_retrieve_all_schema.rs src/models/success_status.rs src/models/update_documents_200_response.rs +src/models/update_documents_parameters.rs src/models/voice_query_model_collection_config.rs diff --git a/typesense_codegen/Cargo.toml b/typesense_codegen/Cargo.toml index e899bf1..8984424 100644 --- a/typesense_codegen/Cargo.toml +++ b/typesense_codegen/Cargo.toml @@ -3,16 +3,17 @@ name = "typesense_codegen" version = "0.25.0" authors = ["OpenAPI Generator team and contributors"] description = "client for typesense generated with openapi spec" -edition = "2018" +edition = "2021" license = "Apache-2.0" [dependencies] -serde = "^1.0" -serde_derive = "^1.0" +serde = { version = "^1.0", features = ["derive"] } serde_json = "^1.0" -url = "^2.2" -uuid = { version = "^1.0", features = ["serde", "v4", "js"] } - -[dependencies.reqwest] -version = "^0.12" -features = ["json", "multipart"] +serde_repr = "^0.1" +url = "^2.5" +reqwest = { version = "^0.12", default-features = false, features = ["json", "multipart"] } +reqwest-middleware = { version = "^0.4", features = ["json", "multipart"] } +[features] +default = ["native-tls"] +native-tls = ["reqwest/native-tls"] +rustls = ["reqwest/rustls-tls"] diff --git a/typesense_codegen/README.md b/typesense_codegen/README.md index ddffbb8..160a217 100644 --- a/typesense_codegen/README.md +++ b/typesense_codegen/README.md @@ -69,6 +69,11 @@ Class | Method | HTTP request | Description *KeysApi* | [**delete_key**](docs/KeysApi.md#delete_key) | **DELETE** /keys/{keyId} | Delete an API key given its ID. *KeysApi* | [**get_key**](docs/KeysApi.md#get_key) | **GET** /keys/{keyId} | Retrieve (metadata about) a key *KeysApi* | [**get_keys**](docs/KeysApi.md#get_keys) | **GET** /keys | Retrieve (metadata about) all keys. +*NlSearchModelsApi* | [**create_nl_search_model**](docs/NlSearchModelsApi.md#create_nl_search_model) | **POST** /nl_search_models | Create a NL search model +*NlSearchModelsApi* | [**delete_nl_search_model**](docs/NlSearchModelsApi.md#delete_nl_search_model) | **DELETE** /nl_search_models/{modelId} | Delete a NL search model +*NlSearchModelsApi* | [**retrieve_all_nl_search_models**](docs/NlSearchModelsApi.md#retrieve_all_nl_search_models) | **GET** /nl_search_models | List all NL search models +*NlSearchModelsApi* | [**retrieve_nl_search_model**](docs/NlSearchModelsApi.md#retrieve_nl_search_model) | **GET** /nl_search_models/{modelId} | Retrieve a NL search model +*NlSearchModelsApi* | [**update_nl_search_model**](docs/NlSearchModelsApi.md#update_nl_search_model) | **PUT** /nl_search_models/{modelId} | Update a NL search model *OperationsApi* | [**get_schema_changes**](docs/OperationsApi.md#get_schema_changes) | **GET** /operations/schema_changes | Get the status of in-progress schema change operations *OperationsApi* | [**retrieve_api_stats**](docs/OperationsApi.md#retrieve_api_stats) | **GET** /stats.json | Get stats about API endpoints. *OperationsApi* | [**retrieve_metrics**](docs/OperationsApi.md#retrieve_metrics) | **GET** /metrics.json | Get current RAM, CPU, Disk & Network usage metrics. @@ -121,11 +126,13 @@ Class | Method | HTTP request | Description - [ConversationModelUpdateSchema](docs/ConversationModelUpdateSchema.md) - [Debug200Response](docs/Debug200Response.md) - [DeleteDocuments200Response](docs/DeleteDocuments200Response.md) + - [DeleteDocumentsParameters](docs/DeleteDocumentsParameters.md) - [DeleteStopwordsSet200Response](docs/DeleteStopwordsSet200Response.md) - [DirtyValues](docs/DirtyValues.md) - [DocumentIndexParameters](docs/DocumentIndexParameters.md) - [DropTokensMode](docs/DropTokensMode.md) - [ErrorResponse](docs/ErrorResponse.md) + - [ExportDocumentsParameters](docs/ExportDocumentsParameters.md) - [FacetCounts](docs/FacetCounts.md) - [FacetCountsCountsInner](docs/FacetCountsCountsInner.md) - [FacetCountsStats](docs/FacetCountsStats.md) @@ -133,6 +140,7 @@ Class | Method | HTTP request | Description - [FieldEmbed](docs/FieldEmbed.md) - [FieldEmbedModelConfig](docs/FieldEmbedModelConfig.md) - [HealthStatus](docs/HealthStatus.md) + - [ImportDocumentsParameters](docs/ImportDocumentsParameters.md) - [IndexAction](docs/IndexAction.md) - [ListStemmingDictionaries200Response](docs/ListStemmingDictionaries200Response.md) - [MultiSearchCollectionParameters](docs/MultiSearchCollectionParameters.md) @@ -140,6 +148,10 @@ Class | Method | HTTP request | Description - [MultiSearchResult](docs/MultiSearchResult.md) - [MultiSearchResultItem](docs/MultiSearchResultItem.md) - [MultiSearchSearchesParameter](docs/MultiSearchSearchesParameter.md) + - [NlSearchModelBase](docs/NlSearchModelBase.md) + - [NlSearchModelCreateSchema](docs/NlSearchModelCreateSchema.md) + - [NlSearchModelDeleteSchema](docs/NlSearchModelDeleteSchema.md) + - [NlSearchModelSchema](docs/NlSearchModelSchema.md) - [PresetDeleteSchema](docs/PresetDeleteSchema.md) - [PresetSchema](docs/PresetSchema.md) - [PresetUpsertSchema](docs/PresetUpsertSchema.md) @@ -176,6 +188,7 @@ Class | Method | HTTP request | Description - [StopwordsSetsRetrieveAllSchema](docs/StopwordsSetsRetrieveAllSchema.md) - [SuccessStatus](docs/SuccessStatus.md) - [UpdateDocuments200Response](docs/UpdateDocuments200Response.md) + - [UpdateDocumentsParameters](docs/UpdateDocumentsParameters.md) - [VoiceQueryModelCollectionConfig](docs/VoiceQueryModelCollectionConfig.md) diff --git a/typesense_codegen/docs/AnalyticsEventCreateSchema.md b/typesense_codegen/docs/AnalyticsEventCreateSchema.md index ddb569b..97c07cc 100644 --- a/typesense_codegen/docs/AnalyticsEventCreateSchema.md +++ b/typesense_codegen/docs/AnalyticsEventCreateSchema.md @@ -4,9 +4,9 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**data** | [**serde_json::Value**](.md) | | -**name** | **String** | | **r#type** | **String** | | +**name** | **String** | | +**data** | [**serde_json::Value**](.md) | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/AnalyticsRuleParameters.md b/typesense_codegen/docs/AnalyticsRuleParameters.md index 48d8f28..9be9eeb 100644 --- a/typesense_codegen/docs/AnalyticsRuleParameters.md +++ b/typesense_codegen/docs/AnalyticsRuleParameters.md @@ -4,10 +4,10 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**source** | [**models::AnalyticsRuleParametersSource**](AnalyticsRuleParametersSource.md) | | **destination** | [**models::AnalyticsRuleParametersDestination**](AnalyticsRuleParametersDestination.md) | | -**expand_query** | Option<**bool**> | | [optional] **limit** | Option<**i32**> | | [optional] -**source** | [**models::AnalyticsRuleParametersSource**](AnalyticsRuleParametersSource.md) | | +**expand_query** | Option<**bool**> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/AnalyticsRuleParametersSourceEventsInner.md b/typesense_codegen/docs/AnalyticsRuleParametersSourceEventsInner.md index 0abc726..31264d2 100644 --- a/typesense_codegen/docs/AnalyticsRuleParametersSourceEventsInner.md +++ b/typesense_codegen/docs/AnalyticsRuleParametersSourceEventsInner.md @@ -4,9 +4,9 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**name** | **String** | | **r#type** | **String** | | **weight** | **f32** | | +**name** | **String** | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/AnalyticsRuleSchema.md b/typesense_codegen/docs/AnalyticsRuleSchema.md index 850d498..4afdcd4 100644 --- a/typesense_codegen/docs/AnalyticsRuleSchema.md +++ b/typesense_codegen/docs/AnalyticsRuleSchema.md @@ -4,8 +4,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**params** | [**models::AnalyticsRuleParameters**](AnalyticsRuleParameters.md) | | **r#type** | **String** | | +**params** | [**models::AnalyticsRuleParameters**](AnalyticsRuleParameters.md) | | **name** | **String** | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/AnalyticsRuleUpsertSchema.md b/typesense_codegen/docs/AnalyticsRuleUpsertSchema.md index 83348b2..fa21e43 100644 --- a/typesense_codegen/docs/AnalyticsRuleUpsertSchema.md +++ b/typesense_codegen/docs/AnalyticsRuleUpsertSchema.md @@ -4,8 +4,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**params** | [**models::AnalyticsRuleParameters**](AnalyticsRuleParameters.md) | | **r#type** | **String** | | +**params** | [**models::AnalyticsRuleParameters**](AnalyticsRuleParameters.md) | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/ApiKey.md b/typesense_codegen/docs/ApiKey.md index 73253db..94bb502 100644 --- a/typesense_codegen/docs/ApiKey.md +++ b/typesense_codegen/docs/ApiKey.md @@ -4,11 +4,11 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**value** | Option<**String**> | | [optional] +**description** | **String** | | **actions** | **Vec** | | **collections** | **Vec** | | -**description** | **String** | | **expires_at** | Option<**i64**> | | [optional] -**value** | Option<**String**> | | [optional] **id** | Option<**i64**> | | [optional][readonly] **value_prefix** | Option<**String**> | | [optional][readonly] diff --git a/typesense_codegen/docs/ApiKeySchema.md b/typesense_codegen/docs/ApiKeySchema.md index 0d39285..761d240 100644 --- a/typesense_codegen/docs/ApiKeySchema.md +++ b/typesense_codegen/docs/ApiKeySchema.md @@ -4,11 +4,11 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**value** | Option<**String**> | | [optional] +**description** | **String** | | **actions** | **Vec** | | **collections** | **Vec** | | -**description** | **String** | | **expires_at** | Option<**i64**> | | [optional] -**value** | Option<**String**> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/CollectionAlias.md b/typesense_codegen/docs/CollectionAlias.md index 87306df..5831360 100644 --- a/typesense_codegen/docs/CollectionAlias.md +++ b/typesense_codegen/docs/CollectionAlias.md @@ -4,8 +4,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**collection_name** | **String** | Name of the collection the alias mapped to | **name** | **String** | Name of the collection alias | [readonly] +**collection_name** | **String** | Name of the collection the alias mapped to | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/CollectionResponse.md b/typesense_codegen/docs/CollectionResponse.md index 47b703b..f67f018 100644 --- a/typesense_codegen/docs/CollectionResponse.md +++ b/typesense_codegen/docs/CollectionResponse.md @@ -4,15 +4,15 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**name** | **String** | Name of the collection | +**fields** | [**Vec**](Field.md) | A list of fields for querying, filtering and faceting | **default_sorting_field** | Option<**String**> | The name of an int32 / float field that determines the order in which the search results are ranked when a sort_by clause is not provided during searching. This field must indicate some kind of popularity. | [optional][default to ] +**token_separators** | Option<**Vec**> | List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. | [optional][default to []] **enable_nested_fields** | Option<**bool**> | Enables experimental support at a collection level for nested object or object array fields. This field is only available if the Typesense server is version `0.24.0.rcn34` or later. | [optional][default to false] -**fields** | [**Vec**](Field.md) | A list of fields for querying, filtering and faceting | -**name** | **String** | Name of the collection | **symbols_to_index** | Option<**Vec**> | List of symbols or special characters to be indexed. | [optional][default to []] -**token_separators** | Option<**Vec**> | List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. | [optional][default to []] **voice_query_model** | Option<[**models::VoiceQueryModelCollectionConfig**](VoiceQueryModelCollectionConfig.md)> | | [optional] -**created_at** | **i64** | Timestamp of when the collection was created (Unix epoch in seconds) | [readonly] **num_documents** | **i64** | Number of documents in the collection | [readonly] +**created_at** | **i64** | Timestamp of when the collection was created (Unix epoch in seconds) | [readonly] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/CollectionSchema.md b/typesense_codegen/docs/CollectionSchema.md index 9b7ae4a..00d94cc 100644 --- a/typesense_codegen/docs/CollectionSchema.md +++ b/typesense_codegen/docs/CollectionSchema.md @@ -4,12 +4,12 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**name** | **String** | Name of the collection | +**fields** | [**Vec**](Field.md) | A list of fields for querying, filtering and faceting | **default_sorting_field** | Option<**String**> | The name of an int32 / float field that determines the order in which the search results are ranked when a sort_by clause is not provided during searching. This field must indicate some kind of popularity. | [optional][default to ] +**token_separators** | Option<**Vec**> | List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. | [optional][default to []] **enable_nested_fields** | Option<**bool**> | Enables experimental support at a collection level for nested object or object array fields. This field is only available if the Typesense server is version `0.24.0.rcn34` or later. | [optional][default to false] -**fields** | [**Vec**](Field.md) | A list of fields for querying, filtering and faceting | -**name** | **String** | Name of the collection | **symbols_to_index** | Option<**Vec**> | List of symbols or special characters to be indexed. | [optional][default to []] -**token_separators** | Option<**Vec**> | List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. | [optional][default to []] **voice_query_model** | Option<[**models::VoiceQueryModelCollectionConfig**](VoiceQueryModelCollectionConfig.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/ConversationModelCreateSchema.md b/typesense_codegen/docs/ConversationModelCreateSchema.md index a070654..a8d70c5 100644 --- a/typesense_codegen/docs/ConversationModelCreateSchema.md +++ b/typesense_codegen/docs/ConversationModelCreateSchema.md @@ -4,14 +4,14 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**account_id** | Option<**String**> | LLM service's account ID (only applicable for Cloudflare) | [optional] -**api_key** | Option<**String**> | The LLM service's API Key | [optional] -**history_collection** | **String** | Typesense collection that stores the historical conversations | **id** | Option<**String**> | An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. | [optional] -**max_bytes** | **i32** | The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. | **model_name** | **String** | Name of the LLM model offered by OpenAI, Cloudflare or vLLM | +**api_key** | Option<**String**> | The LLM service's API Key | [optional] +**history_collection** | **String** | Typesense collection that stores the historical conversations | +**account_id** | Option<**String**> | LLM service's account ID (only applicable for Cloudflare) | [optional] **system_prompt** | Option<**String**> | The system prompt that contains special instructions to the LLM | [optional] **ttl** | Option<**i32**> | Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) | [optional] +**max_bytes** | **i32** | The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. | **vllm_url** | Option<**String**> | URL of vLLM service | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/ConversationModelSchema.md b/typesense_codegen/docs/ConversationModelSchema.md index 74dfa4d..f9fe0d2 100644 --- a/typesense_codegen/docs/ConversationModelSchema.md +++ b/typesense_codegen/docs/ConversationModelSchema.md @@ -4,14 +4,14 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**account_id** | Option<**String**> | LLM service's account ID (only applicable for Cloudflare) | [optional] -**api_key** | Option<**String**> | The LLM service's API Key | [optional] -**history_collection** | **String** | Typesense collection that stores the historical conversations | **id** | **String** | An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. | -**max_bytes** | **i32** | The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. | **model_name** | **String** | Name of the LLM model offered by OpenAI, Cloudflare or vLLM | +**api_key** | Option<**String**> | The LLM service's API Key | [optional] +**history_collection** | **String** | Typesense collection that stores the historical conversations | +**account_id** | Option<**String**> | LLM service's account ID (only applicable for Cloudflare) | [optional] **system_prompt** | Option<**String**> | The system prompt that contains special instructions to the LLM | [optional] **ttl** | Option<**i32**> | Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) | [optional] +**max_bytes** | **i32** | The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. | **vllm_url** | Option<**String**> | URL of vLLM service | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/ConversationModelUpdateSchema.md b/typesense_codegen/docs/ConversationModelUpdateSchema.md index e12c165..a9471ce 100644 --- a/typesense_codegen/docs/ConversationModelUpdateSchema.md +++ b/typesense_codegen/docs/ConversationModelUpdateSchema.md @@ -4,14 +4,14 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**account_id** | Option<**String**> | LLM service's account ID (only applicable for Cloudflare) | [optional] -**api_key** | Option<**String**> | The LLM service's API Key | [optional] -**history_collection** | Option<**String**> | Typesense collection that stores the historical conversations | [optional] **id** | Option<**String**> | An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. | [optional] -**max_bytes** | Option<**i32**> | The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. | [optional] **model_name** | Option<**String**> | Name of the LLM model offered by OpenAI, Cloudflare or vLLM | [optional] +**api_key** | Option<**String**> | The LLM service's API Key | [optional] +**history_collection** | Option<**String**> | Typesense collection that stores the historical conversations | [optional] +**account_id** | Option<**String**> | LLM service's account ID (only applicable for Cloudflare) | [optional] **system_prompt** | Option<**String**> | The system prompt that contains special instructions to the LLM | [optional] **ttl** | Option<**i32**> | Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) | [optional] +**max_bytes** | Option<**i32**> | The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. | [optional] **vllm_url** | Option<**String**> | URL of vLLM service | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/DeleteDocumentsDeleteDocumentsParametersParameter.md b/typesense_codegen/docs/DeleteDocumentsDeleteDocumentsParametersParameter.md new file mode 100644 index 0000000..ca66c4a --- /dev/null +++ b/typesense_codegen/docs/DeleteDocumentsDeleteDocumentsParametersParameter.md @@ -0,0 +1,14 @@ +# DeleteDocumentsDeleteDocumentsParametersParameter + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**filter_by** | **String** | | +**batch_size** | Option<**i32**> | Batch size parameter controls the number of documents that should be deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. | [optional] +**ignore_not_found** | Option<**bool**> | | [optional] +**truncate** | Option<**bool**> | When true, removes all documents from the collection while preserving the collection and its schema. | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/DeleteDocumentsParameters.md b/typesense_codegen/docs/DeleteDocumentsParameters.md new file mode 100644 index 0000000..f960189 --- /dev/null +++ b/typesense_codegen/docs/DeleteDocumentsParameters.md @@ -0,0 +1,14 @@ +# DeleteDocumentsParameters + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**filter_by** | **String** | | +**batch_size** | Option<**i32**> | Batch size parameter controls the number of documents that should be deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. | [optional] +**ignore_not_found** | Option<**bool**> | | [optional] +**truncate** | Option<**bool**> | When true, removes all documents from the collection while preserving the collection and its schema. | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/DocumentsApi.md b/typesense_codegen/docs/DocumentsApi.md index a3e79aa..696c752 100644 --- a/typesense_codegen/docs/DocumentsApi.md +++ b/typesense_codegen/docs/DocumentsApi.md @@ -54,7 +54,7 @@ Name | Type | Description | Required | Notes ## delete_documents -> models::DeleteDocuments200Response delete_documents(collection_name, batch_size, filter_by, ignore_not_found, truncate) +> models::DeleteDocuments200Response delete_documents(collection_name, filter_by, batch_size, ignore_not_found, truncate) Delete a bunch of documents Delete a bunch of documents that match a specific filter condition. Use the `batch_size` parameter to control the number of documents that should deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. @@ -65,8 +65,8 @@ Delete a bunch of documents that match a specific filter condition. Use the `bat Name | Type | Description | Required | Notes ------------- | ------------- | ------------- | ------------- | ------------- **collection_name** | **String** | The name of the collection to delete documents from | [required] | -**batch_size** | Option<**i32**> | | | **filter_by** | Option<**String**> | | | +**batch_size** | Option<**i32**> | | | **ignore_not_found** | Option<**bool**> | | | **truncate** | Option<**bool**> | | | @@ -117,7 +117,7 @@ Name | Type | Description | Required | Notes ## export_documents -> String export_documents(collection_name, exclude_fields, filter_by, include_fields) +> String export_documents(collection_name, filter_by, include_fields, exclude_fields) Export all documents in a collection Export all documents in a collection in JSON lines format. @@ -128,9 +128,9 @@ Export all documents in a collection in JSON lines format. Name | Type | Description | Required | Notes ------------- | ------------- | ------------- | ------------- | ------------- **collection_name** | **String** | The name of the collection | [required] | -**exclude_fields** | Option<**String**> | | | **filter_by** | Option<**String**> | | | **include_fields** | Option<**String**> | | | +**exclude_fields** | Option<**String**> | | | ### Return type @@ -240,7 +240,7 @@ Name | Type | Description | Required | Notes ## import_documents -> String import_documents(collection_name, body, action, batch_size, dirty_values, remote_embedding_batch_size, return_doc, return_id) +> String import_documents(collection_name, body, batch_size, return_id, remote_embedding_batch_size, return_doc, action, dirty_values) Import documents into a collection The documents to be imported must be formatted in a newline delimited JSON structure. You can feed the output file from a Typesense export operation directly as import. @@ -252,12 +252,12 @@ Name | Type | Description | Required | Notes ------------- | ------------- | ------------- | ------------- | ------------- **collection_name** | **String** | The name of the collection | [required] | **body** | **String** | The json array of documents or the JSONL file to import | [required] | -**action** | Option<[**IndexAction**](.md)> | | | **batch_size** | Option<**i32**> | | | -**dirty_values** | Option<[**DirtyValues**](.md)> | | | +**return_id** | Option<**bool**> | | | **remote_embedding_batch_size** | Option<**i32**> | | | **return_doc** | Option<**bool**> | | | -**return_id** | Option<**bool**> | | | +**action** | Option<[**IndexAction**](.md)> | | | +**dirty_values** | Option<[**DirtyValues**](.md)> | | | ### Return type @@ -310,7 +310,7 @@ Name | Type | Description | Required | Notes ## multi_search -> models::MultiSearchResult multi_search(cache_ttl, conversation, conversation_id, conversation_model_id, drop_tokens_mode, drop_tokens_threshold, enable_highlight_v1, enable_overrides, enable_synonyms, enable_typos_for_alpha_numerical_tokens, enable_typos_for_numerical_tokens, exclude_fields, exhaustive_search, facet_by, facet_query, facet_return_parent, facet_strategy, filter_by, filter_curated_hits, group_by, group_limit, group_missing_values, hidden_hits, highlight_affix_num_tokens, highlight_end_tag, highlight_fields, highlight_full_fields, highlight_start_tag, include_fields, infix, limit, max_candidates, max_extra_prefix, max_extra_suffix, max_facet_values, max_filter_by_candidates, min_len_1typo, min_len_2typo, num_typos, offset, override_tags, page, per_page, pinned_hits, pre_segmented_query, prefix, preset, prioritize_exact_match, prioritize_num_matching_fields, prioritize_token_position, q, query_by, query_by_weights, remote_embedding_num_tries, remote_embedding_timeout_ms, search_cutoff_ms, snippet_threshold, sort_by, split_join_tokens, stopwords, synonym_num_typos, synonym_prefix, text_match_type, typo_tokens_threshold, use_cache, vector_query, voice_query, multi_search_searches_parameter) +> models::MultiSearchResult multi_search(q, query_by, query_by_weights, text_match_type, prefix, infix, max_extra_prefix, max_extra_suffix, filter_by, sort_by, facet_by, max_facet_values, facet_query, num_typos, page, per_page, limit, offset, group_by, group_limit, group_missing_values, include_fields, exclude_fields, highlight_full_fields, highlight_affix_num_tokens, highlight_start_tag, highlight_end_tag, snippet_threshold, drop_tokens_threshold, drop_tokens_mode, typo_tokens_threshold, enable_typos_for_alpha_numerical_tokens, filter_curated_hits, enable_synonyms, synonym_prefix, synonym_num_typos, pinned_hits, hidden_hits, override_tags, highlight_fields, pre_segmented_query, preset, enable_overrides, prioritize_exact_match, prioritize_token_position, prioritize_num_matching_fields, enable_typos_for_numerical_tokens, exhaustive_search, search_cutoff_ms, use_cache, cache_ttl, min_len_1typo, min_len_2typo, vector_query, remote_embedding_timeout_ms, remote_embedding_num_tries, facet_strategy, stopwords, facet_return_parent, voice_query, conversation, conversation_model_id, conversation_id, multi_search_searches_parameter) send multiple search requests in a single HTTP request This is especially useful to avoid round-trip network latencies incurred otherwise if each of these requests are sent in separate HTTP requests. You can also use this feature to do a federated search across multiple collections in a single HTTP request. @@ -320,73 +320,69 @@ This is especially useful to avoid round-trip network latencies incurred otherwi Name | Type | Description | Required | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**cache_ttl** | Option<**i32**> | | | -**conversation** | Option<**bool**> | | | -**conversation_id** | Option<**String**> | | | -**conversation_model_id** | Option<**String**> | | | -**drop_tokens_mode** | Option<[**DropTokensMode**](.md)> | | | -**drop_tokens_threshold** | Option<**i32**> | | | -**enable_highlight_v1** | Option<**bool**> | | | -**enable_overrides** | Option<**bool**> | | | -**enable_synonyms** | Option<**bool**> | | | -**enable_typos_for_alpha_numerical_tokens** | Option<**bool**> | | | -**enable_typos_for_numerical_tokens** | Option<**bool**> | | | -**exclude_fields** | Option<**String**> | | | -**exhaustive_search** | Option<**bool**> | | | -**facet_by** | Option<**String**> | | | -**facet_query** | Option<**String**> | | | -**facet_return_parent** | Option<**String**> | | | -**facet_strategy** | Option<**String**> | | | -**filter_by** | Option<**String**> | | | -**filter_curated_hits** | Option<**bool**> | | | -**group_by** | Option<**String**> | | | -**group_limit** | Option<**i32**> | | | -**group_missing_values** | Option<**bool**> | | | -**hidden_hits** | Option<**String**> | | | -**highlight_affix_num_tokens** | Option<**i32**> | | | -**highlight_end_tag** | Option<**String**> | | | -**highlight_fields** | Option<**String**> | | | -**highlight_full_fields** | Option<**String**> | | | -**highlight_start_tag** | Option<**String**> | | | -**include_fields** | Option<**String**> | | | +**q** | Option<**String**> | | | +**query_by** | Option<**String**> | | | +**query_by_weights** | Option<**String**> | | | +**text_match_type** | Option<**String**> | | | +**prefix** | Option<**String**> | | | **infix** | Option<**String**> | | | -**limit** | Option<**i32**> | | | -**max_candidates** | Option<**i32**> | | | **max_extra_prefix** | Option<**i32**> | | | **max_extra_suffix** | Option<**i32**> | | | +**filter_by** | Option<**String**> | | | +**sort_by** | Option<**String**> | | | +**facet_by** | Option<**String**> | | | **max_facet_values** | Option<**i32**> | | | -**max_filter_by_candidates** | Option<**i32**> | | | -**min_len_1typo** | Option<**i32**> | | | -**min_len_2typo** | Option<**i32**> | | | +**facet_query** | Option<**String**> | | | **num_typos** | Option<**String**> | | | -**offset** | Option<**i32**> | | | -**override_tags** | Option<**String**> | | | **page** | Option<**i32**> | | | **per_page** | Option<**i32**> | | | +**limit** | Option<**i32**> | | | +**offset** | Option<**i32**> | | | +**group_by** | Option<**String**> | | | +**group_limit** | Option<**i32**> | | | +**group_missing_values** | Option<**bool**> | | | +**include_fields** | Option<**String**> | | | +**exclude_fields** | Option<**String**> | | | +**highlight_full_fields** | Option<**String**> | | | +**highlight_affix_num_tokens** | Option<**i32**> | | | +**highlight_start_tag** | Option<**String**> | | | +**highlight_end_tag** | Option<**String**> | | | +**snippet_threshold** | Option<**i32**> | | | +**drop_tokens_threshold** | Option<**i32**> | | | +**drop_tokens_mode** | Option<[**DropTokensMode**](.md)> | | | +**typo_tokens_threshold** | Option<**i32**> | | | +**enable_typos_for_alpha_numerical_tokens** | Option<**bool**> | | | +**filter_curated_hits** | Option<**bool**> | | | +**enable_synonyms** | Option<**bool**> | | | +**synonym_prefix** | Option<**bool**> | | | +**synonym_num_typos** | Option<**i32**> | | | **pinned_hits** | Option<**String**> | | | -**pre_segmented_query** | Option<**bool**> | | | -**prefix** | Option<**String**> | | | +**hidden_hits** | Option<**String**> | | | +**override_tags** | Option<**String**> | | | +**highlight_fields** | Option<**String**> | | | +**pre_segmented_query** | Option<**bool**> | | |[default to false] **preset** | Option<**String**> | | | -**prioritize_exact_match** | Option<**bool**> | | | -**prioritize_num_matching_fields** | Option<**bool**> | | | -**prioritize_token_position** | Option<**bool**> | | | -**q** | Option<**String**> | | | -**query_by** | Option<**String**> | | | -**query_by_weights** | Option<**String**> | | | -**remote_embedding_num_tries** | Option<**i32**> | | | -**remote_embedding_timeout_ms** | Option<**i32**> | | | +**enable_overrides** | Option<**bool**> | | |[default to false] +**prioritize_exact_match** | Option<**bool**> | | |[default to true] +**prioritize_token_position** | Option<**bool**> | | |[default to false] +**prioritize_num_matching_fields** | Option<**bool**> | | |[default to true] +**enable_typos_for_numerical_tokens** | Option<**bool**> | | |[default to true] +**exhaustive_search** | Option<**bool**> | | | **search_cutoff_ms** | Option<**i32**> | | | -**snippet_threshold** | Option<**i32**> | | | -**sort_by** | Option<**String**> | | | -**split_join_tokens** | Option<**String**> | | | -**stopwords** | Option<**String**> | | | -**synonym_num_typos** | Option<**i32**> | | | -**synonym_prefix** | Option<**bool**> | | | -**text_match_type** | Option<**String**> | | | -**typo_tokens_threshold** | Option<**i32**> | | | **use_cache** | Option<**bool**> | | | +**cache_ttl** | Option<**i32**> | | | +**min_len_1typo** | Option<**i32**> | | | +**min_len_2typo** | Option<**i32**> | | | **vector_query** | Option<**String**> | | | +**remote_embedding_timeout_ms** | Option<**i32**> | | | +**remote_embedding_num_tries** | Option<**i32**> | | | +**facet_strategy** | Option<**String**> | | | +**stopwords** | Option<**String**> | | | +**facet_return_parent** | Option<**String**> | | | **voice_query** | Option<**String**> | | | +**conversation** | Option<**bool**> | | | +**conversation_model_id** | Option<**String**> | | | +**conversation_id** | Option<**String**> | | | **multi_search_searches_parameter** | Option<[**MultiSearchSearchesParameter**](MultiSearchSearchesParameter.md)> | | | ### Return type @@ -407,7 +403,7 @@ Name | Type | Description | Required | Notes ## search_collection -> models::SearchResult search_collection(collection_name, cache_ttl, conversation, conversation_id, conversation_model_id, drop_tokens_mode, drop_tokens_threshold, enable_highlight_v1, enable_overrides, enable_synonyms, enable_typos_for_alpha_numerical_tokens, enable_typos_for_numerical_tokens, exclude_fields, exhaustive_search, facet_by, facet_query, facet_return_parent, facet_strategy, filter_by, filter_curated_hits, group_by, group_limit, group_missing_values, hidden_hits, highlight_affix_num_tokens, highlight_end_tag, highlight_fields, highlight_full_fields, highlight_start_tag, include_fields, infix, limit, max_candidates, max_extra_prefix, max_extra_suffix, max_facet_values, max_filter_by_candidates, min_len_1typo, min_len_2typo, num_typos, offset, override_tags, page, per_page, pinned_hits, pre_segmented_query, prefix, preset, prioritize_exact_match, prioritize_num_matching_fields, prioritize_token_position, q, query_by, query_by_weights, remote_embedding_num_tries, remote_embedding_timeout_ms, search_cutoff_ms, snippet_threshold, sort_by, split_join_tokens, stopwords, synonym_num_typos, synonym_prefix, text_match_type, typo_tokens_threshold, use_cache, vector_query, voice_query) +> models::SearchResult search_collection(collection_name, q, query_by, nl_query, nl_model_id, query_by_weights, text_match_type, prefix, infix, max_extra_prefix, max_extra_suffix, filter_by, max_filter_by_candidates, sort_by, facet_by, max_facet_values, facet_query, num_typos, page, per_page, limit, offset, group_by, group_limit, group_missing_values, include_fields, exclude_fields, highlight_full_fields, highlight_affix_num_tokens, highlight_start_tag, highlight_end_tag, enable_highlight_v1, snippet_threshold, drop_tokens_threshold, drop_tokens_mode, typo_tokens_threshold, enable_typos_for_alpha_numerical_tokens, filter_curated_hits, enable_synonyms, synonym_prefix, synonym_num_typos, pinned_hits, hidden_hits, override_tags, highlight_fields, split_join_tokens, pre_segmented_query, preset, enable_overrides, prioritize_exact_match, max_candidates, prioritize_token_position, prioritize_num_matching_fields, enable_typos_for_numerical_tokens, exhaustive_search, search_cutoff_ms, use_cache, cache_ttl, min_len_1typo, min_len_2typo, vector_query, remote_embedding_timeout_ms, remote_embedding_num_tries, facet_strategy, stopwords, facet_return_parent, voice_query, conversation, conversation_model_id, conversation_id) Search for documents in a collection Search for documents in a collection that match the search criteria. @@ -418,73 +414,75 @@ Search for documents in a collection that match the search criteria. Name | Type | Description | Required | Notes ------------- | ------------- | ------------- | ------------- | ------------- **collection_name** | **String** | The name of the collection to search for the document under | [required] | -**cache_ttl** | Option<**i32**> | | | -**conversation** | Option<**bool**> | | | -**conversation_id** | Option<**String**> | | | -**conversation_model_id** | Option<**String**> | | | -**drop_tokens_mode** | Option<[**DropTokensMode**](.md)> | | | -**drop_tokens_threshold** | Option<**i32**> | | | -**enable_highlight_v1** | Option<**bool**> | | | -**enable_overrides** | Option<**bool**> | | | -**enable_synonyms** | Option<**bool**> | | | -**enable_typos_for_alpha_numerical_tokens** | Option<**bool**> | | | -**enable_typos_for_numerical_tokens** | Option<**bool**> | | | -**exclude_fields** | Option<**String**> | | | -**exhaustive_search** | Option<**bool**> | | | -**facet_by** | Option<**String**> | | | -**facet_query** | Option<**String**> | | | -**facet_return_parent** | Option<**String**> | | | -**facet_strategy** | Option<**String**> | | | -**filter_by** | Option<**String**> | | | -**filter_curated_hits** | Option<**bool**> | | | -**group_by** | Option<**String**> | | | -**group_limit** | Option<**i32**> | | | -**group_missing_values** | Option<**bool**> | | | -**hidden_hits** | Option<**String**> | | | -**highlight_affix_num_tokens** | Option<**i32**> | | | -**highlight_end_tag** | Option<**String**> | | | -**highlight_fields** | Option<**String**> | | | -**highlight_full_fields** | Option<**String**> | | | -**highlight_start_tag** | Option<**String**> | | | -**include_fields** | Option<**String**> | | | +**q** | Option<**String**> | | | +**query_by** | Option<**String**> | | | +**nl_query** | Option<**bool**> | | | +**nl_model_id** | Option<**String**> | | | +**query_by_weights** | Option<**String**> | | | +**text_match_type** | Option<**String**> | | | +**prefix** | Option<**String**> | | | **infix** | Option<**String**> | | | -**limit** | Option<**i32**> | | | -**max_candidates** | Option<**i32**> | | | **max_extra_prefix** | Option<**i32**> | | | **max_extra_suffix** | Option<**i32**> | | | -**max_facet_values** | Option<**i32**> | | | +**filter_by** | Option<**String**> | | | **max_filter_by_candidates** | Option<**i32**> | | | -**min_len_1typo** | Option<**i32**> | | | -**min_len_2typo** | Option<**i32**> | | | +**sort_by** | Option<**String**> | | | +**facet_by** | Option<**String**> | | | +**max_facet_values** | Option<**i32**> | | | +**facet_query** | Option<**String**> | | | **num_typos** | Option<**String**> | | | -**offset** | Option<**i32**> | | | -**override_tags** | Option<**String**> | | | **page** | Option<**i32**> | | | **per_page** | Option<**i32**> | | | +**limit** | Option<**i32**> | | | +**offset** | Option<**i32**> | | | +**group_by** | Option<**String**> | | | +**group_limit** | Option<**i32**> | | | +**group_missing_values** | Option<**bool**> | | | +**include_fields** | Option<**String**> | | | +**exclude_fields** | Option<**String**> | | | +**highlight_full_fields** | Option<**String**> | | | +**highlight_affix_num_tokens** | Option<**i32**> | | | +**highlight_start_tag** | Option<**String**> | | | +**highlight_end_tag** | Option<**String**> | | | +**enable_highlight_v1** | Option<**bool**> | | |[default to true] +**snippet_threshold** | Option<**i32**> | | | +**drop_tokens_threshold** | Option<**i32**> | | | +**drop_tokens_mode** | Option<[**DropTokensMode**](.md)> | | | +**typo_tokens_threshold** | Option<**i32**> | | | +**enable_typos_for_alpha_numerical_tokens** | Option<**bool**> | | | +**filter_curated_hits** | Option<**bool**> | | | +**enable_synonyms** | Option<**bool**> | | | +**synonym_prefix** | Option<**bool**> | | | +**synonym_num_typos** | Option<**i32**> | | | **pinned_hits** | Option<**String**> | | | +**hidden_hits** | Option<**String**> | | | +**override_tags** | Option<**String**> | | | +**highlight_fields** | Option<**String**> | | | +**split_join_tokens** | Option<**String**> | | | **pre_segmented_query** | Option<**bool**> | | | -**prefix** | Option<**String**> | | | **preset** | Option<**String**> | | | -**prioritize_exact_match** | Option<**bool**> | | | -**prioritize_num_matching_fields** | Option<**bool**> | | | -**prioritize_token_position** | Option<**bool**> | | | -**q** | Option<**String**> | | | -**query_by** | Option<**String**> | | | -**query_by_weights** | Option<**String**> | | | -**remote_embedding_num_tries** | Option<**i32**> | | | -**remote_embedding_timeout_ms** | Option<**i32**> | | | +**enable_overrides** | Option<**bool**> | | |[default to false] +**prioritize_exact_match** | Option<**bool**> | | |[default to true] +**max_candidates** | Option<**i32**> | | | +**prioritize_token_position** | Option<**bool**> | | |[default to false] +**prioritize_num_matching_fields** | Option<**bool**> | | |[default to true] +**enable_typos_for_numerical_tokens** | Option<**bool**> | | |[default to true] +**exhaustive_search** | Option<**bool**> | | | **search_cutoff_ms** | Option<**i32**> | | | -**snippet_threshold** | Option<**i32**> | | | -**sort_by** | Option<**String**> | | | -**split_join_tokens** | Option<**String**> | | | -**stopwords** | Option<**String**> | | | -**synonym_num_typos** | Option<**i32**> | | | -**synonym_prefix** | Option<**bool**> | | | -**text_match_type** | Option<**String**> | | | -**typo_tokens_threshold** | Option<**i32**> | | | **use_cache** | Option<**bool**> | | | +**cache_ttl** | Option<**i32**> | | | +**min_len_1typo** | Option<**i32**> | | | +**min_len_2typo** | Option<**i32**> | | | **vector_query** | Option<**String**> | | | +**remote_embedding_timeout_ms** | Option<**i32**> | | | +**remote_embedding_num_tries** | Option<**i32**> | | | +**facet_strategy** | Option<**String**> | | | +**stopwords** | Option<**String**> | | | +**facet_return_parent** | Option<**String**> | | | **voice_query** | Option<**String**> | | | +**conversation** | Option<**bool**> | | | +**conversation_model_id** | Option<**String**> | | | +**conversation_id** | Option<**String**> | | | ### Return type diff --git a/typesense_codegen/docs/ExportDocumentsExportDocumentsParametersParameter.md b/typesense_codegen/docs/ExportDocumentsExportDocumentsParametersParameter.md new file mode 100644 index 0000000..553431f --- /dev/null +++ b/typesense_codegen/docs/ExportDocumentsExportDocumentsParametersParameter.md @@ -0,0 +1,13 @@ +# ExportDocumentsExportDocumentsParametersParameter + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**filter_by** | Option<**String**> | Filter conditions for refining your search results. Separate multiple conditions with &&. | [optional] +**include_fields** | Option<**String**> | List of fields from the document to include in the search result | [optional] +**exclude_fields** | Option<**String**> | List of fields from the document to exclude in the search result | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/ExportDocumentsParameters.md b/typesense_codegen/docs/ExportDocumentsParameters.md new file mode 100644 index 0000000..25c7dc7 --- /dev/null +++ b/typesense_codegen/docs/ExportDocumentsParameters.md @@ -0,0 +1,13 @@ +# ExportDocumentsParameters + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**filter_by** | Option<**String**> | Filter conditions for refining your search results. Separate multiple conditions with &&. | [optional] +**include_fields** | Option<**String**> | List of fields from the document to include in the search result | [optional] +**exclude_fields** | Option<**String**> | List of fields from the document to exclude in the search result | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/FacetCountsCountsInner.md b/typesense_codegen/docs/FacetCountsCountsInner.md index ebdef35..3733448 100644 --- a/typesense_codegen/docs/FacetCountsCountsInner.md +++ b/typesense_codegen/docs/FacetCountsCountsInner.md @@ -6,8 +6,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **count** | Option<**i32**> | | [optional] **highlighted** | Option<**String**> | | [optional] -**parent** | Option<[**serde_json::Value**](.md)> | | [optional] **value** | Option<**String**> | | [optional] +**parent** | Option<[**serde_json::Value**](.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/FacetCountsStats.md b/typesense_codegen/docs/FacetCountsStats.md index 3dd5e79..8917968 100644 --- a/typesense_codegen/docs/FacetCountsStats.md +++ b/typesense_codegen/docs/FacetCountsStats.md @@ -4,11 +4,11 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**avg** | Option<**f64**> | | [optional] **max** | Option<**f64**> | | [optional] **min** | Option<**f64**> | | [optional] **sum** | Option<**f64**> | | [optional] **total_values** | Option<**i32**> | | [optional] +**avg** | Option<**f64**> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/Field.md b/typesense_codegen/docs/Field.md index fb5a6db..17afabd 100644 --- a/typesense_codegen/docs/Field.md +++ b/typesense_codegen/docs/Field.md @@ -4,25 +4,25 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**drop** | Option<**bool**> | | [optional] -**embed** | Option<[**models::FieldEmbed**](Field_embed.md)> | | [optional] +**name** | **String** | | +**r#type** | **String** | | +**optional** | Option<**bool**> | | [optional] **facet** | Option<**bool**> | | [optional] **index** | Option<**bool**> | | [optional][default to true] -**infix** | Option<**bool**> | | [optional][default to false] **locale** | Option<**String**> | | [optional] -**name** | **String** | | +**sort** | Option<**bool**> | | [optional] +**infix** | Option<**bool**> | | [optional][default to false] +**reference** | Option<**String**> | Name of a field in another collection that should be linked to this collection so that it can be joined during query. | [optional] **num_dim** | Option<**i32**> | | [optional] -**optional** | Option<**bool**> | | [optional] +**drop** | Option<**bool**> | | [optional] +**store** | Option<**bool**> | When set to false, the field value will not be stored on disk. Default: true. | [optional] +**vec_dist** | Option<**String**> | The distance metric to be used for vector search. Default: `cosine`. You can also use `ip` for inner product. | [optional] **range_index** | Option<**bool**> | Enables an index optimized for range filtering on numerical fields (e.g. rating:>3.5). Default: false. | [optional] -**reference** | Option<**String**> | Name of a field in another collection that should be linked to this collection so that it can be joined during query. | [optional] -**sort** | Option<**bool**> | | [optional] **stem** | Option<**bool**> | Values are stemmed before indexing in-memory. Default: false. | [optional] **stem_dictionary** | Option<**String**> | Name of the stemming dictionary to use for this field | [optional] -**store** | Option<**bool**> | When set to false, the field value will not be stored on disk. Default: true. | [optional] -**symbols_to_index** | Option<**Vec**> | List of symbols or special characters to be indexed. | [optional][default to []] **token_separators** | Option<**Vec**> | List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. | [optional][default to []] -**r#type** | **String** | | -**vec_dist** | Option<**String**> | The distance metric to be used for vector search. Default: `cosine`. You can also use `ip` for inner product. | [optional] +**symbols_to_index** | Option<**Vec**> | List of symbols or special characters to be indexed. | [optional][default to []] +**embed** | Option<[**models::FieldEmbed**](Field_embed.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/FieldEmbedModelConfig.md b/typesense_codegen/docs/FieldEmbedModelConfig.md index e19321e..8ee1235 100644 --- a/typesense_codegen/docs/FieldEmbedModelConfig.md +++ b/typesense_codegen/docs/FieldEmbedModelConfig.md @@ -4,16 +4,16 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**access_token** | Option<**String**> | | [optional] +**model_name** | **String** | | **api_key** | Option<**String**> | | [optional] +**url** | Option<**String**> | | [optional] +**access_token** | Option<**String**> | | [optional] +**refresh_token** | Option<**String**> | | [optional] **client_id** | Option<**String**> | | [optional] **client_secret** | Option<**String**> | | [optional] -**indexing_prefix** | Option<**String**> | | [optional] -**model_name** | **String** | | **project_id** | Option<**String**> | | [optional] +**indexing_prefix** | Option<**String**> | | [optional] **query_prefix** | Option<**String**> | | [optional] -**refresh_token** | Option<**String**> | | [optional] -**url** | Option<**String**> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/ImportDocumentsImportDocumentsParametersParameter.md b/typesense_codegen/docs/ImportDocumentsImportDocumentsParametersParameter.md new file mode 100644 index 0000000..185baaa --- /dev/null +++ b/typesense_codegen/docs/ImportDocumentsImportDocumentsParametersParameter.md @@ -0,0 +1,16 @@ +# ImportDocumentsImportDocumentsParametersParameter + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**batch_size** | Option<**i32**> | | [optional] +**return_id** | Option<**bool**> | Returning the id of the imported documents. If you want the import response to return the ingested document's id in the response, you can use the return_id parameter. | [optional] +**remote_embedding_batch_size** | Option<**i32**> | | [optional] +**return_doc** | Option<**bool**> | | [optional] +**action** | Option<[**models::IndexAction**](IndexAction.md)> | | [optional] +**dirty_values** | Option<[**models::DirtyValues**](DirtyValues.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/ImportDocumentsParameters.md b/typesense_codegen/docs/ImportDocumentsParameters.md new file mode 100644 index 0000000..67099eb --- /dev/null +++ b/typesense_codegen/docs/ImportDocumentsParameters.md @@ -0,0 +1,16 @@ +# ImportDocumentsParameters + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**batch_size** | Option<**i32**> | | [optional] +**return_id** | Option<**bool**> | Returning the id of the imported documents. If you want the import response to return the ingested document's id in the response, you can use the return_id parameter. | [optional] +**remote_embedding_batch_size** | Option<**i32**> | | [optional] +**return_doc** | Option<**bool**> | | [optional] +**action** | Option<[**models::IndexAction**](IndexAction.md)> | | [optional] +**dirty_values** | Option<[**models::DirtyValues**](DirtyValues.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/MultiSearchCollectionParameters.md b/typesense_codegen/docs/MultiSearchCollectionParameters.md index 2f65746..8a611d2 100644 --- a/typesense_codegen/docs/MultiSearchCollectionParameters.md +++ b/typesense_codegen/docs/MultiSearchCollectionParameters.md @@ -4,72 +4,72 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**cache_ttl** | Option<**i32**> | The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. | [optional] -**conversation** | Option<**bool**> | Enable conversational search. | [optional] -**conversation_id** | Option<**String**> | The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. | [optional] -**conversation_model_id** | Option<**String**> | The Id of Conversation Model to be used. | [optional] -**drop_tokens_mode** | Option<[**models::DropTokensMode**](DropTokensMode.md)> | | [optional] -**drop_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 | [optional] -**enable_overrides** | Option<**bool**> | If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false | [optional][default to false] -**enable_synonyms** | Option<**bool**> | If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true | [optional] -**enable_typos_for_alpha_numerical_tokens** | Option<**bool**> | Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. | [optional] -**enable_typos_for_numerical_tokens** | Option<**bool**> | Make Typesense disable typos for numerical tokens. | [optional][default to true] -**exclude_fields** | Option<**String**> | List of fields from the document to exclude in the search result | [optional] -**exhaustive_search** | Option<**bool**> | Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). | [optional] -**facet_by** | Option<**String**> | A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. | [optional] -**facet_query** | Option<**String**> | Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". | [optional] -**facet_return_parent** | Option<**String**> | Comma separated string of nested facet fields whose parent object should be returned in facet response. | [optional] -**facet_strategy** | Option<**String**> | Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). | [optional] -**filter_by** | Option<**String**> | Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. | [optional] -**filter_curated_hits** | Option<**bool**> | Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false | [optional] -**group_by** | Option<**String**> | You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. | [optional] -**group_limit** | Option<**i32**> | Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 | [optional] -**group_missing_values** | Option<**bool**> | Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true | [optional] -**hidden_hits** | Option<**String**> | A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] -**highlight_affix_num_tokens** | Option<**i32**> | The number of tokens that should surround the highlighted text on each side. Default: 4 | [optional] -**highlight_end_tag** | Option<**String**> | The end tag used for the highlighted snippets. Default: `` | [optional] -**highlight_fields** | Option<**String**> | A list of custom fields that must be highlighted even if you don't query for them | [optional] -**highlight_full_fields** | Option<**String**> | List of fields which should be highlighted fully without snippeting | [optional] -**highlight_start_tag** | Option<**String**> | The start tag used for the highlighted snippets. Default: `` | [optional] -**include_fields** | Option<**String**> | List of fields from the document to include in the search result | [optional] +**q** | Option<**String**> | The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. | [optional] +**query_by** | Option<**String**> | A list of `string` fields that should be queried against. Multiple fields are separated with a comma. | [optional] +**query_by_weights** | Option<**String**> | The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. | [optional] +**text_match_type** | Option<**String**> | In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. | [optional] +**prefix** | Option<**String**> | Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. | [optional] **infix** | Option<**String**> | If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results | [optional] -**limit** | Option<**i32**> | Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. | [optional] **max_extra_prefix** | Option<**i32**> | There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. | [optional] **max_extra_suffix** | Option<**i32**> | There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. | [optional] +**filter_by** | Option<**String**> | Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. | [optional] +**sort_by** | Option<**String**> | A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` | [optional] +**facet_by** | Option<**String**> | A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. | [optional] **max_facet_values** | Option<**i32**> | Maximum number of facet values to be returned. | [optional] -**min_len_1typo** | Option<**i32**> | Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] -**min_len_2typo** | Option<**i32**> | Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] +**facet_query** | Option<**String**> | Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". | [optional] **num_typos** | Option<**String**> | The number of typographical errors (1 or 2) that would be tolerated. Default: 2 | [optional] -**offset** | Option<**i32**> | Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. | [optional] -**override_tags** | Option<**String**> | Comma separated list of tags to trigger the curations rules that match the tags. | [optional] **page** | Option<**i32**> | Results from this specific page number would be fetched. | [optional] **per_page** | Option<**i32**> | Number of results to fetch per page. Default: 10 | [optional] +**limit** | Option<**i32**> | Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. | [optional] +**offset** | Option<**i32**> | Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. | [optional] +**group_by** | Option<**String**> | You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. | [optional] +**group_limit** | Option<**i32**> | Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 | [optional] +**group_missing_values** | Option<**bool**> | Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true | [optional] +**include_fields** | Option<**String**> | List of fields from the document to include in the search result | [optional] +**exclude_fields** | Option<**String**> | List of fields from the document to exclude in the search result | [optional] +**highlight_full_fields** | Option<**String**> | List of fields which should be highlighted fully without snippeting | [optional] +**highlight_affix_num_tokens** | Option<**i32**> | The number of tokens that should surround the highlighted text on each side. Default: 4 | [optional] +**highlight_start_tag** | Option<**String**> | The start tag used for the highlighted snippets. Default: `` | [optional] +**highlight_end_tag** | Option<**String**> | The end tag used for the highlighted snippets. Default: `` | [optional] +**snippet_threshold** | Option<**i32**> | Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 | [optional] +**drop_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 | [optional] +**drop_tokens_mode** | Option<[**models::DropTokensMode**](DropTokensMode.md)> | | [optional] +**typo_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 | [optional] +**enable_typos_for_alpha_numerical_tokens** | Option<**bool**> | Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. | [optional] +**filter_curated_hits** | Option<**bool**> | Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false | [optional] +**enable_synonyms** | Option<**bool**> | If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true | [optional] +**synonym_prefix** | Option<**bool**> | Allow synonym resolution on word prefixes in the query. Default: false | [optional] +**synonym_num_typos** | Option<**i32**> | Allow synonym resolution on typo-corrected words in the query. Default: 0 | [optional] **pinned_hits** | Option<**String**> | A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] +**hidden_hits** | Option<**String**> | A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] +**override_tags** | Option<**String**> | Comma separated list of tags to trigger the curations rules that match the tags. | [optional] +**highlight_fields** | Option<**String**> | A list of custom fields that must be highlighted even if you don't query for them | [optional] **pre_segmented_query** | Option<**bool**> | You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same | [optional][default to false] -**prefix** | Option<**String**> | Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. | [optional] **preset** | Option<**String**> | Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. | [optional] +**enable_overrides** | Option<**bool**> | If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false | [optional][default to false] **prioritize_exact_match** | Option<**bool**> | Set this parameter to true to ensure that an exact match is ranked above the others | [optional][default to true] -**prioritize_num_matching_fields** | Option<**bool**> | Make Typesense prioritize documents where the query words appear in more number of fields. | [optional][default to true] **prioritize_token_position** | Option<**bool**> | Make Typesense prioritize documents where the query words appear earlier in the text. | [optional][default to false] -**q** | Option<**String**> | The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. | [optional] -**query_by** | Option<**String**> | A list of `string` fields that should be queried against. Multiple fields are separated with a comma. | [optional] -**query_by_weights** | Option<**String**> | The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. | [optional] -**remote_embedding_num_tries** | Option<**i32**> | Number of times to retry fetching remote embeddings. | [optional] -**remote_embedding_timeout_ms** | Option<**i32**> | Timeout (in milliseconds) for fetching remote embeddings. | [optional] +**prioritize_num_matching_fields** | Option<**bool**> | Make Typesense prioritize documents where the query words appear in more number of fields. | [optional][default to true] +**enable_typos_for_numerical_tokens** | Option<**bool**> | Make Typesense disable typos for numerical tokens. | [optional][default to true] +**exhaustive_search** | Option<**bool**> | Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). | [optional] **search_cutoff_ms** | Option<**i32**> | Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. | [optional] -**snippet_threshold** | Option<**i32**> | Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 | [optional] -**sort_by** | Option<**String**> | A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` | [optional] -**stopwords** | Option<**String**> | Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. | [optional] -**synonym_num_typos** | Option<**i32**> | Allow synonym resolution on typo-corrected words in the query. Default: 0 | [optional] -**synonym_prefix** | Option<**bool**> | Allow synonym resolution on word prefixes in the query. Default: false | [optional] -**text_match_type** | Option<**String**> | In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. | [optional] -**typo_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 | [optional] **use_cache** | Option<**bool**> | Enable server side caching of search query results. By default, caching is disabled. | [optional] +**cache_ttl** | Option<**i32**> | The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. | [optional] +**min_len_1typo** | Option<**i32**> | Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] +**min_len_2typo** | Option<**i32**> | Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] **vector_query** | Option<**String**> | Vector query expression for fetching documents \"closest\" to a given query/document vector. | [optional] +**remote_embedding_timeout_ms** | Option<**i32**> | Timeout (in milliseconds) for fetching remote embeddings. | [optional] +**remote_embedding_num_tries** | Option<**i32**> | Number of times to retry fetching remote embeddings. | [optional] +**facet_strategy** | Option<**String**> | Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). | [optional] +**stopwords** | Option<**String**> | Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. | [optional] +**facet_return_parent** | Option<**String**> | Comma separated string of nested facet fields whose parent object should be returned in facet response. | [optional] **voice_query** | Option<**String**> | The base64 encoded audio file in 16 khz 16-bit WAV format. | [optional] +**conversation** | Option<**bool**> | Enable conversational search. | [optional] +**conversation_model_id** | Option<**String**> | The Id of Conversation Model to be used. | [optional] +**conversation_id** | Option<**String**> | The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. | [optional] **collection** | Option<**String**> | The collection to search in. | [optional] -**rerank_hybrid_matches** | Option<**bool**> | When true, computes both text match and vector distance scores for all matches in hybrid search. Documents found only through keyword search will get a vector distance score, and documents found only through vector search will get a text match score. | [optional][default to false] **x_typesense_api_key** | Option<**String**> | A separate search API key for each search within a multi_search request | [optional] +**rerank_hybrid_matches** | Option<**bool**> | When true, computes both text match and vector distance scores for all matches in hybrid search. Documents found only through keyword search will get a vector distance score, and documents found only through vector search will get a text match score. | [optional][default to false] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/MultiSearchParameters.md b/typesense_codegen/docs/MultiSearchParameters.md index 7f569f4..08c363b 100644 --- a/typesense_codegen/docs/MultiSearchParameters.md +++ b/typesense_codegen/docs/MultiSearchParameters.md @@ -4,69 +4,69 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**cache_ttl** | Option<**i32**> | The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. | [optional] -**conversation** | Option<**bool**> | Enable conversational search. | [optional] -**conversation_id** | Option<**String**> | The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. | [optional] -**conversation_model_id** | Option<**String**> | The Id of Conversation Model to be used. | [optional] -**drop_tokens_mode** | Option<[**models::DropTokensMode**](DropTokensMode.md)> | | [optional] -**drop_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 | [optional] -**enable_overrides** | Option<**bool**> | If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false | [optional][default to false] -**enable_synonyms** | Option<**bool**> | If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true | [optional] -**enable_typos_for_alpha_numerical_tokens** | Option<**bool**> | Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. | [optional] -**enable_typos_for_numerical_tokens** | Option<**bool**> | Make Typesense disable typos for numerical tokens. | [optional][default to true] -**exclude_fields** | Option<**String**> | List of fields from the document to exclude in the search result | [optional] -**exhaustive_search** | Option<**bool**> | Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). | [optional] -**facet_by** | Option<**String**> | A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. | [optional] -**facet_query** | Option<**String**> | Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". | [optional] -**facet_return_parent** | Option<**String**> | Comma separated string of nested facet fields whose parent object should be returned in facet response. | [optional] -**facet_strategy** | Option<**String**> | Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). | [optional] -**filter_by** | Option<**String**> | Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. | [optional] -**filter_curated_hits** | Option<**bool**> | Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false | [optional] -**group_by** | Option<**String**> | You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. | [optional] -**group_limit** | Option<**i32**> | Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 | [optional] -**group_missing_values** | Option<**bool**> | Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true | [optional] -**hidden_hits** | Option<**String**> | A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] -**highlight_affix_num_tokens** | Option<**i32**> | The number of tokens that should surround the highlighted text on each side. Default: 4 | [optional] -**highlight_end_tag** | Option<**String**> | The end tag used for the highlighted snippets. Default: `` | [optional] -**highlight_fields** | Option<**String**> | A list of custom fields that must be highlighted even if you don't query for them | [optional] -**highlight_full_fields** | Option<**String**> | List of fields which should be highlighted fully without snippeting | [optional] -**highlight_start_tag** | Option<**String**> | The start tag used for the highlighted snippets. Default: `` | [optional] -**include_fields** | Option<**String**> | List of fields from the document to include in the search result | [optional] +**q** | Option<**String**> | The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. | [optional] +**query_by** | Option<**String**> | A list of `string` fields that should be queried against. Multiple fields are separated with a comma. | [optional] +**query_by_weights** | Option<**String**> | The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. | [optional] +**text_match_type** | Option<**String**> | In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. | [optional] +**prefix** | Option<**String**> | Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. | [optional] **infix** | Option<**String**> | If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results | [optional] -**limit** | Option<**i32**> | Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. | [optional] **max_extra_prefix** | Option<**i32**> | There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. | [optional] **max_extra_suffix** | Option<**i32**> | There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. | [optional] +**filter_by** | Option<**String**> | Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. | [optional] +**sort_by** | Option<**String**> | A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` | [optional] +**facet_by** | Option<**String**> | A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. | [optional] **max_facet_values** | Option<**i32**> | Maximum number of facet values to be returned. | [optional] -**min_len_1typo** | Option<**i32**> | Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] -**min_len_2typo** | Option<**i32**> | Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] +**facet_query** | Option<**String**> | Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". | [optional] **num_typos** | Option<**String**> | The number of typographical errors (1 or 2) that would be tolerated. Default: 2 | [optional] -**offset** | Option<**i32**> | Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. | [optional] -**override_tags** | Option<**String**> | Comma separated list of tags to trigger the curations rules that match the tags. | [optional] **page** | Option<**i32**> | Results from this specific page number would be fetched. | [optional] **per_page** | Option<**i32**> | Number of results to fetch per page. Default: 10 | [optional] +**limit** | Option<**i32**> | Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. | [optional] +**offset** | Option<**i32**> | Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. | [optional] +**group_by** | Option<**String**> | You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. | [optional] +**group_limit** | Option<**i32**> | Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 | [optional] +**group_missing_values** | Option<**bool**> | Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true | [optional] +**include_fields** | Option<**String**> | List of fields from the document to include in the search result | [optional] +**exclude_fields** | Option<**String**> | List of fields from the document to exclude in the search result | [optional] +**highlight_full_fields** | Option<**String**> | List of fields which should be highlighted fully without snippeting | [optional] +**highlight_affix_num_tokens** | Option<**i32**> | The number of tokens that should surround the highlighted text on each side. Default: 4 | [optional] +**highlight_start_tag** | Option<**String**> | The start tag used for the highlighted snippets. Default: `` | [optional] +**highlight_end_tag** | Option<**String**> | The end tag used for the highlighted snippets. Default: `` | [optional] +**snippet_threshold** | Option<**i32**> | Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 | [optional] +**drop_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 | [optional] +**drop_tokens_mode** | Option<[**models::DropTokensMode**](DropTokensMode.md)> | | [optional] +**typo_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 | [optional] +**enable_typos_for_alpha_numerical_tokens** | Option<**bool**> | Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. | [optional] +**filter_curated_hits** | Option<**bool**> | Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false | [optional] +**enable_synonyms** | Option<**bool**> | If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true | [optional] +**synonym_prefix** | Option<**bool**> | Allow synonym resolution on word prefixes in the query. Default: false | [optional] +**synonym_num_typos** | Option<**i32**> | Allow synonym resolution on typo-corrected words in the query. Default: 0 | [optional] **pinned_hits** | Option<**String**> | A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] +**hidden_hits** | Option<**String**> | A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] +**override_tags** | Option<**String**> | Comma separated list of tags to trigger the curations rules that match the tags. | [optional] +**highlight_fields** | Option<**String**> | A list of custom fields that must be highlighted even if you don't query for them | [optional] **pre_segmented_query** | Option<**bool**> | You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same | [optional][default to false] -**prefix** | Option<**String**> | Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. | [optional] **preset** | Option<**String**> | Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. | [optional] +**enable_overrides** | Option<**bool**> | If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false | [optional][default to false] **prioritize_exact_match** | Option<**bool**> | Set this parameter to true to ensure that an exact match is ranked above the others | [optional][default to true] -**prioritize_num_matching_fields** | Option<**bool**> | Make Typesense prioritize documents where the query words appear in more number of fields. | [optional][default to true] **prioritize_token_position** | Option<**bool**> | Make Typesense prioritize documents where the query words appear earlier in the text. | [optional][default to false] -**q** | Option<**String**> | The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. | [optional] -**query_by** | Option<**String**> | A list of `string` fields that should be queried against. Multiple fields are separated with a comma. | [optional] -**query_by_weights** | Option<**String**> | The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. | [optional] -**remote_embedding_num_tries** | Option<**i32**> | Number of times to retry fetching remote embeddings. | [optional] -**remote_embedding_timeout_ms** | Option<**i32**> | Timeout (in milliseconds) for fetching remote embeddings. | [optional] +**prioritize_num_matching_fields** | Option<**bool**> | Make Typesense prioritize documents where the query words appear in more number of fields. | [optional][default to true] +**enable_typos_for_numerical_tokens** | Option<**bool**> | Make Typesense disable typos for numerical tokens. | [optional][default to true] +**exhaustive_search** | Option<**bool**> | Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). | [optional] **search_cutoff_ms** | Option<**i32**> | Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. | [optional] -**snippet_threshold** | Option<**i32**> | Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 | [optional] -**sort_by** | Option<**String**> | A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` | [optional] -**stopwords** | Option<**String**> | Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. | [optional] -**synonym_num_typos** | Option<**i32**> | Allow synonym resolution on typo-corrected words in the query. Default: 0 | [optional] -**synonym_prefix** | Option<**bool**> | Allow synonym resolution on word prefixes in the query. Default: false | [optional] -**text_match_type** | Option<**String**> | In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. | [optional] -**typo_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 | [optional] **use_cache** | Option<**bool**> | Enable server side caching of search query results. By default, caching is disabled. | [optional] +**cache_ttl** | Option<**i32**> | The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. | [optional] +**min_len_1typo** | Option<**i32**> | Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] +**min_len_2typo** | Option<**i32**> | Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] **vector_query** | Option<**String**> | Vector query expression for fetching documents \"closest\" to a given query/document vector. | [optional] +**remote_embedding_timeout_ms** | Option<**i32**> | Timeout (in milliseconds) for fetching remote embeddings. | [optional] +**remote_embedding_num_tries** | Option<**i32**> | Number of times to retry fetching remote embeddings. | [optional] +**facet_strategy** | Option<**String**> | Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). | [optional] +**stopwords** | Option<**String**> | Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. | [optional] +**facet_return_parent** | Option<**String**> | Comma separated string of nested facet fields whose parent object should be returned in facet response. | [optional] **voice_query** | Option<**String**> | The base64 encoded audio file in 16 khz 16-bit WAV format. | [optional] +**conversation** | Option<**bool**> | Enable conversational search. | [optional] +**conversation_model_id** | Option<**String**> | The Id of Conversation Model to be used. | [optional] +**conversation_id** | Option<**String**> | The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/MultiSearchResult.md b/typesense_codegen/docs/MultiSearchResult.md index ae61e32..55bad9d 100644 --- a/typesense_codegen/docs/MultiSearchResult.md +++ b/typesense_codegen/docs/MultiSearchResult.md @@ -4,8 +4,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**conversation** | Option<[**models::SearchResultConversation**](SearchResultConversation.md)> | | [optional] **results** | [**Vec**](MultiSearchResultItem.md) | | +**conversation** | Option<[**models::SearchResultConversation**](SearchResultConversation.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/MultiSearchResultItem.md b/typesense_codegen/docs/MultiSearchResultItem.md index 2ba4735..7d88f4c 100644 --- a/typesense_codegen/docs/MultiSearchResultItem.md +++ b/typesense_codegen/docs/MultiSearchResultItem.md @@ -4,17 +4,17 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**conversation** | Option<[**models::SearchResultConversation**](SearchResultConversation.md)> | | [optional] **facet_counts** | Option<[**Vec**](FacetCounts.md)> | | [optional] **found** | Option<**i32**> | The number of documents found | [optional] **found_docs** | Option<**i32**> | | [optional] -**grouped_hits** | Option<[**Vec**](SearchGroupedHit.md)> | | [optional] -**hits** | Option<[**Vec**](SearchResultHit.md)> | The documents that matched the search query | [optional] +**search_time_ms** | Option<**i32**> | The number of milliseconds the search took | [optional] **out_of** | Option<**i32**> | The total number of documents in the collection | [optional] +**search_cutoff** | Option<**bool**> | Whether the search was cut off | [optional] **page** | Option<**i32**> | The search result page number | [optional] +**grouped_hits** | Option<[**Vec**](SearchGroupedHit.md)> | | [optional] +**hits** | Option<[**Vec**](SearchResultHit.md)> | The documents that matched the search query | [optional] **request_params** | Option<[**models::SearchResultRequestParams**](SearchResult_request_params.md)> | | [optional] -**search_cutoff** | Option<**bool**> | Whether the search was cut off | [optional] -**search_time_ms** | Option<**i32**> | The number of milliseconds the search took | [optional] +**conversation** | Option<[**models::SearchResultConversation**](SearchResultConversation.md)> | | [optional] **code** | Option<**i64**> | HTTP error code | [optional] **error** | Option<**String**> | Error description | [optional] diff --git a/typesense_codegen/docs/MultiSearchSearchesParameter.md b/typesense_codegen/docs/MultiSearchSearchesParameter.md index 3b34942..0b71465 100644 --- a/typesense_codegen/docs/MultiSearchSearchesParameter.md +++ b/typesense_codegen/docs/MultiSearchSearchesParameter.md @@ -4,8 +4,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**searches** | [**Vec**](MultiSearchCollectionParameters.md) | | **union** | Option<**bool**> | When true, merges the search results from each search query into a single ordered set of hits. | [optional] +**searches** | [**Vec**](MultiSearchCollectionParameters.md) | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/NlSearchModelBase.md b/typesense_codegen/docs/NlSearchModelBase.md new file mode 100644 index 0000000..b331895 --- /dev/null +++ b/typesense_codegen/docs/NlSearchModelBase.md @@ -0,0 +1,28 @@ +# NlSearchModelBase + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**model_name** | Option<**String**> | Name of the NL model to use | [optional] +**api_key** | Option<**String**> | API key for the NL model service | [optional] +**api_url** | Option<**String**> | Custom API URL for the NL model service | [optional] +**max_bytes** | Option<**i32**> | Maximum number of bytes to process | [optional] +**temperature** | Option<**f64**> | Temperature parameter for the NL model | [optional] +**system_prompt** | Option<**String**> | System prompt for the NL model | [optional] +**top_p** | Option<**f64**> | Top-p parameter for the NL model (Google-specific) | [optional] +**top_k** | Option<**i32**> | Top-k parameter for the NL model (Google-specific) | [optional] +**stop_sequences** | Option<**Vec**> | Stop sequences for the NL model (Google-specific) | [optional] +**api_version** | Option<**String**> | API version for the NL model service | [optional] +**project_id** | Option<**String**> | Project ID for GCP Vertex AI | [optional] +**access_token** | Option<**String**> | Access token for GCP Vertex AI | [optional] +**refresh_token** | Option<**String**> | Refresh token for GCP Vertex AI | [optional] +**client_id** | Option<**String**> | Client ID for GCP Vertex AI | [optional] +**client_secret** | Option<**String**> | Client secret for GCP Vertex AI | [optional] +**region** | Option<**String**> | Region for GCP Vertex AI | [optional] +**max_output_tokens** | Option<**i32**> | Maximum output tokens for GCP Vertex AI | [optional] +**account_id** | Option<**String**> | Account ID for Cloudflare-specific models | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/NlSearchModelCreateSchema.md b/typesense_codegen/docs/NlSearchModelCreateSchema.md new file mode 100644 index 0000000..6690186 --- /dev/null +++ b/typesense_codegen/docs/NlSearchModelCreateSchema.md @@ -0,0 +1,29 @@ +# NlSearchModelCreateSchema + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**model_name** | Option<**String**> | Name of the NL model to use | [optional] +**api_key** | Option<**String**> | API key for the NL model service | [optional] +**api_url** | Option<**String**> | Custom API URL for the NL model service | [optional] +**max_bytes** | Option<**i32**> | Maximum number of bytes to process | [optional] +**temperature** | Option<**f64**> | Temperature parameter for the NL model | [optional] +**system_prompt** | Option<**String**> | System prompt for the NL model | [optional] +**top_p** | Option<**f64**> | Top-p parameter for the NL model (Google-specific) | [optional] +**top_k** | Option<**i32**> | Top-k parameter for the NL model (Google-specific) | [optional] +**stop_sequences** | Option<**Vec**> | Stop sequences for the NL model (Google-specific) | [optional] +**api_version** | Option<**String**> | API version for the NL model service | [optional] +**project_id** | Option<**String**> | Project ID for GCP Vertex AI | [optional] +**access_token** | Option<**String**> | Access token for GCP Vertex AI | [optional] +**refresh_token** | Option<**String**> | Refresh token for GCP Vertex AI | [optional] +**client_id** | Option<**String**> | Client ID for GCP Vertex AI | [optional] +**client_secret** | Option<**String**> | Client secret for GCP Vertex AI | [optional] +**region** | Option<**String**> | Region for GCP Vertex AI | [optional] +**max_output_tokens** | Option<**i32**> | Maximum output tokens for GCP Vertex AI | [optional] +**account_id** | Option<**String**> | Account ID for Cloudflare-specific models | [optional] +**id** | Option<**String**> | Optional ID for the NL search model | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/NlSearchModelDeleteSchema.md b/typesense_codegen/docs/NlSearchModelDeleteSchema.md new file mode 100644 index 0000000..97be1ac --- /dev/null +++ b/typesense_codegen/docs/NlSearchModelDeleteSchema.md @@ -0,0 +1,11 @@ +# NlSearchModelDeleteSchema + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **String** | ID of the deleted NL search model | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/NlSearchModelSchema.md b/typesense_codegen/docs/NlSearchModelSchema.md new file mode 100644 index 0000000..24f870f --- /dev/null +++ b/typesense_codegen/docs/NlSearchModelSchema.md @@ -0,0 +1,29 @@ +# NlSearchModelSchema + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**model_name** | Option<**String**> | Name of the NL model to use | [optional] +**api_key** | Option<**String**> | API key for the NL model service | [optional] +**api_url** | Option<**String**> | Custom API URL for the NL model service | [optional] +**max_bytes** | Option<**i32**> | Maximum number of bytes to process | [optional] +**temperature** | Option<**f64**> | Temperature parameter for the NL model | [optional] +**system_prompt** | Option<**String**> | System prompt for the NL model | [optional] +**top_p** | Option<**f64**> | Top-p parameter for the NL model (Google-specific) | [optional] +**top_k** | Option<**i32**> | Top-k parameter for the NL model (Google-specific) | [optional] +**stop_sequences** | Option<**Vec**> | Stop sequences for the NL model (Google-specific) | [optional] +**api_version** | Option<**String**> | API version for the NL model service | [optional] +**project_id** | Option<**String**> | Project ID for GCP Vertex AI | [optional] +**access_token** | Option<**String**> | Access token for GCP Vertex AI | [optional] +**refresh_token** | Option<**String**> | Refresh token for GCP Vertex AI | [optional] +**client_id** | Option<**String**> | Client ID for GCP Vertex AI | [optional] +**client_secret** | Option<**String**> | Client secret for GCP Vertex AI | [optional] +**region** | Option<**String**> | Region for GCP Vertex AI | [optional] +**max_output_tokens** | Option<**i32**> | Maximum output tokens for GCP Vertex AI | [optional] +**account_id** | Option<**String**> | Account ID for Cloudflare-specific models | [optional] +**id** | **String** | ID of the NL search model | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/NlSearchModelsApi.md b/typesense_codegen/docs/NlSearchModelsApi.md new file mode 100644 index 0000000..5608643 --- /dev/null +++ b/typesense_codegen/docs/NlSearchModelsApi.md @@ -0,0 +1,161 @@ +# \NlSearchModelsApi + +All URIs are relative to *http://localhost* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**create_nl_search_model**](NlSearchModelsApi.md#create_nl_search_model) | **POST** /nl_search_models | Create a NL search model +[**delete_nl_search_model**](NlSearchModelsApi.md#delete_nl_search_model) | **DELETE** /nl_search_models/{modelId} | Delete a NL search model +[**retrieve_all_nl_search_models**](NlSearchModelsApi.md#retrieve_all_nl_search_models) | **GET** /nl_search_models | List all NL search models +[**retrieve_nl_search_model**](NlSearchModelsApi.md#retrieve_nl_search_model) | **GET** /nl_search_models/{modelId} | Retrieve a NL search model +[**update_nl_search_model**](NlSearchModelsApi.md#update_nl_search_model) | **PUT** /nl_search_models/{modelId} | Update a NL search model + + + +## create_nl_search_model + +> models::NlSearchModelSchema create_nl_search_model(nl_search_model_create_schema) +Create a NL search model + +Create a new NL search model. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**nl_search_model_create_schema** | [**NlSearchModelCreateSchema**](NlSearchModelCreateSchema.md) | The NL search model to be created | [required] | + +### Return type + +[**models::NlSearchModelSchema**](NLSearchModelSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## delete_nl_search_model + +> models::NlSearchModelDeleteSchema delete_nl_search_model(model_id) +Delete a NL search model + +Delete a specific NL search model by its ID. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**model_id** | **String** | The ID of the NL search model to delete | [required] | + +### Return type + +[**models::NlSearchModelDeleteSchema**](NLSearchModelDeleteSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## retrieve_all_nl_search_models + +> Vec retrieve_all_nl_search_models() +List all NL search models + +Retrieve all NL search models. + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +[**Vec**](NLSearchModelSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## retrieve_nl_search_model + +> models::NlSearchModelSchema retrieve_nl_search_model(model_id) +Retrieve a NL search model + +Retrieve a specific NL search model by its ID. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**model_id** | **String** | The ID of the NL search model to retrieve | [required] | + +### Return type + +[**models::NlSearchModelSchema**](NLSearchModelSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## update_nl_search_model + +> models::NlSearchModelSchema update_nl_search_model(model_id, body) +Update a NL search model + +Update an existing NL search model. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**model_id** | **String** | The ID of the NL search model to update | [required] | +**body** | **models::NlSearchModelCreateSchema** | The NL search model fields to update | [required] | + +### Return type + +[**models::NlSearchModelSchema**](NLSearchModelSchema.md) + +### Authorization + +[api_key_header](../README.md#api_key_header) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/typesense_codegen/docs/SchemaChangeStatus.md b/typesense_codegen/docs/SchemaChangeStatus.md index 60ec4dd..5a87798 100644 --- a/typesense_codegen/docs/SchemaChangeStatus.md +++ b/typesense_codegen/docs/SchemaChangeStatus.md @@ -4,9 +4,9 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**altered_docs** | Option<**i32**> | Number of documents that have been altered | [optional] **collection** | Option<**String**> | Name of the collection being modified | [optional] **validated_docs** | Option<**i32**> | Number of documents that have been validated | [optional] +**altered_docs** | Option<**i32**> | Number of documents that have been altered | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/ScopedKeyParameters.md b/typesense_codegen/docs/ScopedKeyParameters.md index 791d83b..e29c05a 100644 --- a/typesense_codegen/docs/ScopedKeyParameters.md +++ b/typesense_codegen/docs/ScopedKeyParameters.md @@ -4,8 +4,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**expires_at** | Option<**i64**> | | [optional] **filter_by** | Option<**String**> | | [optional] +**expires_at** | Option<**i64**> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchHighlight.md b/typesense_codegen/docs/SearchHighlight.md index 7b2b5a7..7ae7a33 100644 --- a/typesense_codegen/docs/SearchHighlight.md +++ b/typesense_codegen/docs/SearchHighlight.md @@ -5,12 +5,12 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **field** | Option<**String**> | | [optional] -**indices** | Option<**Vec**> | The indices property will be present only for string[] fields and will contain the corresponding indices of the snippets in the search field | [optional] -**matched_tokens** | Option<[**Vec**](serde_json::Value.md)> | | [optional] **snippet** | Option<**String**> | Present only for (non-array) string fields | [optional] **snippets** | Option<**Vec**> | Present only for (array) string[] fields | [optional] **value** | Option<**String**> | Full field value with highlighting, present only for (non-array) string fields | [optional] **values** | Option<**Vec**> | Full field value with highlighting, present only for (array) string[] fields | [optional] +**indices** | Option<**Vec**> | The indices property will be present only for string[] fields and will contain the corresponding indices of the snippets in the search field | [optional] +**matched_tokens** | Option<[**Vec**](serde_json::Value.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchOverride.md b/typesense_codegen/docs/SearchOverride.md index 356631c..1204992 100644 --- a/typesense_codegen/docs/SearchOverride.md +++ b/typesense_codegen/docs/SearchOverride.md @@ -4,17 +4,17 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**effective_from_ts** | Option<**i32**> | A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. | [optional] -**effective_to_ts** | Option<**i32**> | A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. | [optional] +**rule** | [**models::SearchOverrideRule**](SearchOverrideRule.md) | | +**includes** | Option<[**Vec**](SearchOverrideInclude.md)> | List of document `id`s that should be included in the search results with their corresponding `position`s. | [optional] **excludes** | Option<[**Vec**](SearchOverrideExclude.md)> | List of document `id`s that should be excluded from the search results. | [optional] **filter_by** | Option<**String**> | A filter by clause that is applied to any search query that matches the override rule. | [optional] -**filter_curated_hits** | Option<**bool**> | When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. | [optional] -**includes** | Option<[**Vec**](SearchOverrideInclude.md)> | List of document `id`s that should be included in the search results with their corresponding `position`s. | [optional] -**metadata** | Option<[**serde_json::Value**](.md)> | Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. | [optional] **remove_matched_tokens** | Option<**bool**> | Indicates whether search query tokens that exist in the override's rule should be removed from the search query. | [optional] -**replace_query** | Option<**String**> | Replaces the current search query with this value, when the search query matches the override rule. | [optional] -**rule** | [**models::SearchOverrideRule**](SearchOverrideRule.md) | | +**metadata** | Option<[**serde_json::Value**](.md)> | Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. | [optional] **sort_by** | Option<**String**> | A sort by clause that is applied to any search query that matches the override rule. | [optional] +**replace_query** | Option<**String**> | Replaces the current search query with this value, when the search query matches the override rule. | [optional] +**filter_curated_hits** | Option<**bool**> | When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. | [optional] +**effective_from_ts** | Option<**i32**> | A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. | [optional] +**effective_to_ts** | Option<**i32**> | A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. | [optional] **stop_processing** | Option<**bool**> | When set to true, override processing will stop at the first matching rule. When set to false override processing will continue and multiple override actions will be triggered in sequence. Overrides are processed in the lexical sort order of their id field. Default: true. | [optional] **id** | **String** | | [readonly] diff --git a/typesense_codegen/docs/SearchOverrideRule.md b/typesense_codegen/docs/SearchOverrideRule.md index 8af3a3d..2234944 100644 --- a/typesense_codegen/docs/SearchOverrideRule.md +++ b/typesense_codegen/docs/SearchOverrideRule.md @@ -4,10 +4,10 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**filter_by** | Option<**String**> | Indicates that the override should apply when the filter_by parameter in a search query exactly matches the string specified here (including backticks, spaces, brackets, etc). | [optional] -**r#match** | Option<**String**> | Indicates whether the match on the query term should be `exact` or `contains`. If we want to match all queries that contained the word `apple`, we will use the `contains` match instead. | [optional] -**query** | Option<**String**> | Indicates what search queries should be overridden | [optional] **tags** | Option<**Vec**> | List of tag values to associate with this override rule. | [optional] +**query** | Option<**String**> | Indicates what search queries should be overridden | [optional] +**r#match** | Option<**String**> | Indicates whether the match on the query term should be `exact` or `contains`. If we want to match all queries that contained the word `apple`, we will use the `contains` match instead. | [optional] +**filter_by** | Option<**String**> | Indicates that the override should apply when the filter_by parameter in a search query exactly matches the string specified here (including backticks, spaces, brackets, etc). | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchOverrideSchema.md b/typesense_codegen/docs/SearchOverrideSchema.md index b941538..4da9648 100644 --- a/typesense_codegen/docs/SearchOverrideSchema.md +++ b/typesense_codegen/docs/SearchOverrideSchema.md @@ -4,17 +4,17 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**effective_from_ts** | Option<**i32**> | A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. | [optional] -**effective_to_ts** | Option<**i32**> | A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. | [optional] +**rule** | [**models::SearchOverrideRule**](SearchOverrideRule.md) | | +**includes** | Option<[**Vec**](SearchOverrideInclude.md)> | List of document `id`s that should be included in the search results with their corresponding `position`s. | [optional] **excludes** | Option<[**Vec**](SearchOverrideExclude.md)> | List of document `id`s that should be excluded from the search results. | [optional] **filter_by** | Option<**String**> | A filter by clause that is applied to any search query that matches the override rule. | [optional] -**filter_curated_hits** | Option<**bool**> | When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. | [optional] -**includes** | Option<[**Vec**](SearchOverrideInclude.md)> | List of document `id`s that should be included in the search results with their corresponding `position`s. | [optional] -**metadata** | Option<[**serde_json::Value**](.md)> | Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. | [optional] **remove_matched_tokens** | Option<**bool**> | Indicates whether search query tokens that exist in the override's rule should be removed from the search query. | [optional] -**replace_query** | Option<**String**> | Replaces the current search query with this value, when the search query matches the override rule. | [optional] -**rule** | [**models::SearchOverrideRule**](SearchOverrideRule.md) | | +**metadata** | Option<[**serde_json::Value**](.md)> | Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. | [optional] **sort_by** | Option<**String**> | A sort by clause that is applied to any search query that matches the override rule. | [optional] +**replace_query** | Option<**String**> | Replaces the current search query with this value, when the search query matches the override rule. | [optional] +**filter_curated_hits** | Option<**bool**> | When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. | [optional] +**effective_from_ts** | Option<**i32**> | A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. | [optional] +**effective_to_ts** | Option<**i32**> | A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. | [optional] **stop_processing** | Option<**bool**> | When set to true, override processing will stop at the first matching rule. When set to false override processing will continue and multiple override actions will be triggered in sequence. Overrides are processed in the lexical sort order of their id field. Default: true. | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchParameters.md b/typesense_codegen/docs/SearchParameters.md index a27ef45..578cd4e 100644 --- a/typesense_codegen/docs/SearchParameters.md +++ b/typesense_codegen/docs/SearchParameters.md @@ -4,73 +4,75 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**cache_ttl** | Option<**i32**> | The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. | [optional] -**conversation** | Option<**bool**> | Enable conversational search. | [optional] -**conversation_id** | Option<**String**> | The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. | [optional] -**conversation_model_id** | Option<**String**> | The Id of Conversation Model to be used. | [optional] -**drop_tokens_mode** | Option<[**models::DropTokensMode**](DropTokensMode.md)> | | [optional] -**drop_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 | [optional] -**enable_highlight_v1** | Option<**bool**> | Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true | [optional][default to true] -**enable_overrides** | Option<**bool**> | If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false | [optional][default to false] -**enable_synonyms** | Option<**bool**> | If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true | [optional] -**enable_typos_for_alpha_numerical_tokens** | Option<**bool**> | Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. | [optional] -**enable_typos_for_numerical_tokens** | Option<**bool**> | Make Typesense disable typos for numerical tokens. | [optional][default to true] -**exclude_fields** | Option<**String**> | List of fields from the document to exclude in the search result | [optional] -**exhaustive_search** | Option<**bool**> | Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). | [optional] -**facet_by** | Option<**String**> | A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. | [optional] -**facet_query** | Option<**String**> | Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". | [optional] -**facet_return_parent** | Option<**String**> | Comma separated string of nested facet fields whose parent object should be returned in facet response. | [optional] -**facet_strategy** | Option<**String**> | Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). | [optional] -**filter_by** | Option<**String**> | Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. | [optional] -**filter_curated_hits** | Option<**bool**> | Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false | [optional] -**group_by** | Option<**String**> | You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. | [optional] -**group_limit** | Option<**i32**> | Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 | [optional] -**group_missing_values** | Option<**bool**> | Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true | [optional] -**hidden_hits** | Option<**String**> | A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] -**highlight_affix_num_tokens** | Option<**i32**> | The number of tokens that should surround the highlighted text on each side. Default: 4 | [optional] -**highlight_end_tag** | Option<**String**> | The end tag used for the highlighted snippets. Default: `` | [optional] -**highlight_fields** | Option<**String**> | A list of custom fields that must be highlighted even if you don't query for them | [optional] -**highlight_full_fields** | Option<**String**> | List of fields which should be highlighted fully without snippeting | [optional] -**highlight_start_tag** | Option<**String**> | The start tag used for the highlighted snippets. Default: `` | [optional] -**include_fields** | Option<**String**> | List of fields from the document to include in the search result | [optional] +**q** | Option<**String**> | The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. | [optional] +**query_by** | Option<**String**> | A list of `string` fields that should be queried against. Multiple fields are separated with a comma. | [optional] +**nl_query** | Option<**bool**> | Whether to use natural language processing to parse the query. | [optional] +**nl_model_id** | Option<**String**> | The ID of the natural language model to use. | [optional] +**query_by_weights** | Option<**String**> | The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. | [optional] +**text_match_type** | Option<**String**> | In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. | [optional] +**prefix** | Option<**String**> | Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. | [optional] **infix** | Option<**String**> | If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results | [optional] -**limit** | Option<**i32**> | Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. | [optional] -**max_candidates** | Option<**i32**> | Control the number of words that Typesense considers for typo and prefix searching. | [optional] **max_extra_prefix** | Option<**i32**> | There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. | [optional] **max_extra_suffix** | Option<**i32**> | There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. | [optional] -**max_facet_values** | Option<**i32**> | Maximum number of facet values to be returned. | [optional] +**filter_by** | Option<**String**> | Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. | [optional] **max_filter_by_candidates** | Option<**i32**> | Controls the number of similar words that Typesense considers during fuzzy search on filter_by values. Useful for controlling prefix matches like company_name:Acm*. | [optional] -**min_len_1typo** | Option<**i32**> | Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] -**min_len_2typo** | Option<**i32**> | Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] +**sort_by** | Option<**String**> | A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` | [optional] +**facet_by** | Option<**String**> | A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. | [optional] +**max_facet_values** | Option<**i32**> | Maximum number of facet values to be returned. | [optional] +**facet_query** | Option<**String**> | Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". | [optional] **num_typos** | Option<**String**> | The number of typographical errors (1 or 2) that would be tolerated. Default: 2 | [optional] -**offset** | Option<**i32**> | Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. | [optional] -**override_tags** | Option<**String**> | Comma separated list of tags to trigger the curations rules that match the tags. | [optional] **page** | Option<**i32**> | Results from this specific page number would be fetched. | [optional] **per_page** | Option<**i32**> | Number of results to fetch per page. Default: 10 | [optional] +**limit** | Option<**i32**> | Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. | [optional] +**offset** | Option<**i32**> | Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. | [optional] +**group_by** | Option<**String**> | You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. | [optional] +**group_limit** | Option<**i32**> | Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 | [optional] +**group_missing_values** | Option<**bool**> | Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true | [optional] +**include_fields** | Option<**String**> | List of fields from the document to include in the search result | [optional] +**exclude_fields** | Option<**String**> | List of fields from the document to exclude in the search result | [optional] +**highlight_full_fields** | Option<**String**> | List of fields which should be highlighted fully without snippeting | [optional] +**highlight_affix_num_tokens** | Option<**i32**> | The number of tokens that should surround the highlighted text on each side. Default: 4 | [optional] +**highlight_start_tag** | Option<**String**> | The start tag used for the highlighted snippets. Default: `` | [optional] +**highlight_end_tag** | Option<**String**> | The end tag used for the highlighted snippets. Default: `` | [optional] +**enable_highlight_v1** | Option<**bool**> | Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true | [optional][default to true] +**snippet_threshold** | Option<**i32**> | Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 | [optional] +**drop_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 | [optional] +**drop_tokens_mode** | Option<[**models::DropTokensMode**](DropTokensMode.md)> | | [optional] +**typo_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 | [optional] +**enable_typos_for_alpha_numerical_tokens** | Option<**bool**> | Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. | [optional] +**filter_curated_hits** | Option<**bool**> | Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false | [optional] +**enable_synonyms** | Option<**bool**> | If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true | [optional] +**synonym_prefix** | Option<**bool**> | Allow synonym resolution on word prefixes in the query. Default: false | [optional] +**synonym_num_typos** | Option<**i32**> | Allow synonym resolution on typo-corrected words in the query. Default: 0 | [optional] **pinned_hits** | Option<**String**> | A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] +**hidden_hits** | Option<**String**> | A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. | [optional] +**override_tags** | Option<**String**> | Comma separated list of tags to trigger the curations rules that match the tags. | [optional] +**highlight_fields** | Option<**String**> | A list of custom fields that must be highlighted even if you don't query for them | [optional] +**split_join_tokens** | Option<**String**> | Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. Splitting/joining of tokens will only be attempted if the original query produces no results. To always trigger this behavior, set value to `always``. To disable, set value to `off`. Default is `fallback`. | [optional] **pre_segmented_query** | Option<**bool**> | You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same | [optional] -**prefix** | Option<**String**> | Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. | [optional] **preset** | Option<**String**> | Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. | [optional] +**enable_overrides** | Option<**bool**> | If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false | [optional][default to false] **prioritize_exact_match** | Option<**bool**> | Set this parameter to true to ensure that an exact match is ranked above the others | [optional][default to true] -**prioritize_num_matching_fields** | Option<**bool**> | Make Typesense prioritize documents where the query words appear in more number of fields. | [optional][default to true] +**max_candidates** | Option<**i32**> | Control the number of words that Typesense considers for typo and prefix searching. | [optional] **prioritize_token_position** | Option<**bool**> | Make Typesense prioritize documents where the query words appear earlier in the text. | [optional][default to false] -**q** | Option<**String**> | The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. | [optional] -**query_by** | Option<**String**> | A list of `string` fields that should be queried against. Multiple fields are separated with a comma. | [optional] -**query_by_weights** | Option<**String**> | The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. | [optional] -**remote_embedding_num_tries** | Option<**i32**> | Number of times to retry fetching remote embeddings. | [optional] -**remote_embedding_timeout_ms** | Option<**i32**> | Timeout (in milliseconds) for fetching remote embeddings. | [optional] +**prioritize_num_matching_fields** | Option<**bool**> | Make Typesense prioritize documents where the query words appear in more number of fields. | [optional][default to true] +**enable_typos_for_numerical_tokens** | Option<**bool**> | Make Typesense disable typos for numerical tokens. | [optional][default to true] +**exhaustive_search** | Option<**bool**> | Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). | [optional] **search_cutoff_ms** | Option<**i32**> | Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. | [optional] -**snippet_threshold** | Option<**i32**> | Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 | [optional] -**sort_by** | Option<**String**> | A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` | [optional] -**split_join_tokens** | Option<**String**> | Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. Splitting/joining of tokens will only be attempted if the original query produces no results. To always trigger this behavior, set value to `always``. To disable, set value to `off`. Default is `fallback`. | [optional] -**stopwords** | Option<**String**> | Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. | [optional] -**synonym_num_typos** | Option<**i32**> | Allow synonym resolution on typo-corrected words in the query. Default: 0 | [optional] -**synonym_prefix** | Option<**bool**> | Allow synonym resolution on word prefixes in the query. Default: false | [optional] -**text_match_type** | Option<**String**> | In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. | [optional] -**typo_tokens_threshold** | Option<**i32**> | If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 | [optional] **use_cache** | Option<**bool**> | Enable server side caching of search query results. By default, caching is disabled. | [optional] +**cache_ttl** | Option<**i32**> | The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. | [optional] +**min_len_1typo** | Option<**i32**> | Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] +**min_len_2typo** | Option<**i32**> | Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. | [optional] **vector_query** | Option<**String**> | Vector query expression for fetching documents \"closest\" to a given query/document vector. | [optional] +**remote_embedding_timeout_ms** | Option<**i32**> | Timeout (in milliseconds) for fetching remote embeddings. | [optional] +**remote_embedding_num_tries** | Option<**i32**> | Number of times to retry fetching remote embeddings. | [optional] +**facet_strategy** | Option<**String**> | Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). | [optional] +**stopwords** | Option<**String**> | Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. | [optional] +**facet_return_parent** | Option<**String**> | Comma separated string of nested facet fields whose parent object should be returned in facet response. | [optional] **voice_query** | Option<**String**> | The base64 encoded audio file in 16 khz 16-bit WAV format. | [optional] +**conversation** | Option<**bool**> | Enable conversational search. | [optional] +**conversation_model_id** | Option<**String**> | The Id of Conversation Model to be used. | [optional] +**conversation_id** | Option<**String**> | The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchResult.md b/typesense_codegen/docs/SearchResult.md index 4c5aa58..2739d28 100644 --- a/typesense_codegen/docs/SearchResult.md +++ b/typesense_codegen/docs/SearchResult.md @@ -4,17 +4,17 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**conversation** | Option<[**models::SearchResultConversation**](SearchResultConversation.md)> | | [optional] **facet_counts** | Option<[**Vec**](FacetCounts.md)> | | [optional] **found** | Option<**i32**> | The number of documents found | [optional] **found_docs** | Option<**i32**> | | [optional] -**grouped_hits** | Option<[**Vec**](SearchGroupedHit.md)> | | [optional] -**hits** | Option<[**Vec**](SearchResultHit.md)> | The documents that matched the search query | [optional] +**search_time_ms** | Option<**i32**> | The number of milliseconds the search took | [optional] **out_of** | Option<**i32**> | The total number of documents in the collection | [optional] +**search_cutoff** | Option<**bool**> | Whether the search was cut off | [optional] **page** | Option<**i32**> | The search result page number | [optional] +**grouped_hits** | Option<[**Vec**](SearchGroupedHit.md)> | | [optional] +**hits** | Option<[**Vec**](SearchResultHit.md)> | The documents that matched the search query | [optional] **request_params** | Option<[**models::SearchResultRequestParams**](SearchResult_request_params.md)> | | [optional] -**search_cutoff** | Option<**bool**> | Whether the search was cut off | [optional] -**search_time_ms** | Option<**i32**> | The number of milliseconds the search took | [optional] +**conversation** | Option<[**models::SearchResultConversation**](SearchResultConversation.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchResultHit.md b/typesense_codegen/docs/SearchResultHit.md index 9af8364..df343b1 100644 --- a/typesense_codegen/docs/SearchResultHit.md +++ b/typesense_codegen/docs/SearchResultHit.md @@ -4,12 +4,12 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**document** | Option<[**serde_json::Value**](.md)> | Can be any key-value pair | [optional] -**geo_distance_meters** | Option<**std::collections::HashMap**> | Can be any key-value pair | [optional] -**highlight** | Option<[**std::collections::HashMap**](serde_json::Value.md)> | Highlighted version of the matching document | [optional] **highlights** | Option<[**Vec**](SearchHighlight.md)> | (Deprecated) Contains highlighted portions of the search fields | [optional] +**highlight** | Option<[**std::collections::HashMap**](serde_json::Value.md)> | Highlighted version of the matching document | [optional] +**document** | Option<[**serde_json::Value**](.md)> | Can be any key-value pair | [optional] **text_match** | Option<**i64**> | | [optional] **text_match_info** | Option<[**models::SearchResultHitTextMatchInfo**](SearchResultHit_text_match_info.md)> | | [optional] +**geo_distance_meters** | Option<**std::collections::HashMap**> | Can be any key-value pair | [optional] **vector_distance** | Option<**f32**> | Distance between the query vector and matching document's vector value | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchResultRequestParams.md b/typesense_codegen/docs/SearchResultRequestParams.md index 299f95e..9eeb27e 100644 --- a/typesense_codegen/docs/SearchResultRequestParams.md +++ b/typesense_codegen/docs/SearchResultRequestParams.md @@ -5,8 +5,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **collection_name** | **String** | | -**per_page** | **i32** | | **q** | **String** | | +**per_page** | **i32** | | **voice_query** | Option<[**models::SearchResultRequestParamsVoiceQuery**](SearchResult_request_params_voice_query.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchSynonym.md b/typesense_codegen/docs/SearchSynonym.md index 8d2ef72..70c9964 100644 --- a/typesense_codegen/docs/SearchSynonym.md +++ b/typesense_codegen/docs/SearchSynonym.md @@ -4,10 +4,10 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**locale** | Option<**String**> | Locale for the synonym, leave blank to use the standard tokenizer. | [optional] **root** | Option<**String**> | For 1-way synonyms, indicates the root word that words in the `synonyms` parameter map to. | [optional] -**symbols_to_index** | Option<**Vec**> | By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is. | [optional] **synonyms** | **Vec** | Array of words that should be considered as synonyms. | +**locale** | Option<**String**> | Locale for the synonym, leave blank to use the standard tokenizer. | [optional] +**symbols_to_index** | Option<**Vec**> | By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is. | [optional] **id** | **String** | | [readonly] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/SearchSynonymSchema.md b/typesense_codegen/docs/SearchSynonymSchema.md index 6f305d1..bac8165 100644 --- a/typesense_codegen/docs/SearchSynonymSchema.md +++ b/typesense_codegen/docs/SearchSynonymSchema.md @@ -4,10 +4,10 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**locale** | Option<**String**> | Locale for the synonym, leave blank to use the standard tokenizer. | [optional] **root** | Option<**String**> | For 1-way synonyms, indicates the root word that words in the `synonyms` parameter map to. | [optional] -**symbols_to_index** | Option<**Vec**> | By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is. | [optional] **synonyms** | **Vec** | Array of words that should be considered as synonyms. | +**locale** | Option<**String**> | Locale for the synonym, leave blank to use the standard tokenizer. | [optional] +**symbols_to_index** | Option<**Vec**> | By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is. | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/StemmingDictionaryWordsInner.md b/typesense_codegen/docs/StemmingDictionaryWordsInner.md index f9a957d..7d9a515 100644 --- a/typesense_codegen/docs/StemmingDictionaryWordsInner.md +++ b/typesense_codegen/docs/StemmingDictionaryWordsInner.md @@ -4,8 +4,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**root** | **String** | The root form of the word | **word** | **String** | The word form to be stemmed | +**root** | **String** | The root form of the word | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/StopwordsSetSchema.md b/typesense_codegen/docs/StopwordsSetSchema.md index 637565b..97d49dc 100644 --- a/typesense_codegen/docs/StopwordsSetSchema.md +++ b/typesense_codegen/docs/StopwordsSetSchema.md @@ -5,8 +5,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **id** | **String** | | -**locale** | Option<**String**> | | [optional] **stopwords** | **Vec** | | +**locale** | Option<**String**> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/StopwordsSetUpsertSchema.md b/typesense_codegen/docs/StopwordsSetUpsertSchema.md index da91c46..29082d9 100644 --- a/typesense_codegen/docs/StopwordsSetUpsertSchema.md +++ b/typesense_codegen/docs/StopwordsSetUpsertSchema.md @@ -4,8 +4,8 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**locale** | Option<**String**> | | [optional] **stopwords** | **Vec** | | +**locale** | Option<**String**> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/typesense_codegen/docs/UpdateDocumentsParameters.md b/typesense_codegen/docs/UpdateDocumentsParameters.md new file mode 100644 index 0000000..31eef40 --- /dev/null +++ b/typesense_codegen/docs/UpdateDocumentsParameters.md @@ -0,0 +1,11 @@ +# UpdateDocumentsParameters + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**filter_by** | Option<**String**> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/docs/UpdateDocumentsUpdateDocumentsParametersParameter.md b/typesense_codegen/docs/UpdateDocumentsUpdateDocumentsParametersParameter.md new file mode 100644 index 0000000..0ba0554 --- /dev/null +++ b/typesense_codegen/docs/UpdateDocumentsUpdateDocumentsParametersParameter.md @@ -0,0 +1,11 @@ +# UpdateDocumentsUpdateDocumentsParametersParameter + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**filter_by** | Option<**String**> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/typesense_codegen/src/apis/analytics_api.rs b/typesense_codegen/src/apis/analytics_api.rs index 827f420..ad3b431 100644 --- a/typesense_codegen/src/apis/analytics_api.rs +++ b/typesense_codegen/src/apis/analytics_api.rs @@ -14,6 +14,43 @@ use serde::{Deserialize, Serialize, de::Error as _}; use crate::{apis::ResponseContent, models}; use super::{Error, configuration, ContentType}; +/// struct for passing parameters to the method [`create_analytics_event`] +#[derive(Clone, Debug)] +pub struct CreateAnalyticsEventParams { + /// The Analytics event to be created + pub analytics_event_create_schema: models::AnalyticsEventCreateSchema +} + +/// struct for passing parameters to the method [`create_analytics_rule`] +#[derive(Clone, Debug)] +pub struct CreateAnalyticsRuleParams { + /// The Analytics rule to be created + pub analytics_rule_schema: models::AnalyticsRuleSchema +} + +/// struct for passing parameters to the method [`delete_analytics_rule`] +#[derive(Clone, Debug)] +pub struct DeleteAnalyticsRuleParams { + /// The name of the analytics rule to delete + pub rule_name: String +} + +/// struct for passing parameters to the method [`retrieve_analytics_rule`] +#[derive(Clone, Debug)] +pub struct RetrieveAnalyticsRuleParams { + /// The name of the analytics rule to retrieve + pub rule_name: String +} + +/// struct for passing parameters to the method [`upsert_analytics_rule`] +#[derive(Clone, Debug)] +pub struct UpsertAnalyticsRuleParams { + /// The name of the analytics rule to upsert + pub rule_name: String, + /// The Analytics rule to be upserted + pub analytics_rule_upsert_schema: models::AnalyticsRuleUpsertSchema +} + /// struct for typed errors of method [`create_analytics_event`] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -64,9 +101,7 @@ pub enum UpsertAnalyticsRuleError { /// Sending events for analytics e.g rank search results based on popularity. -pub async fn create_analytics_event(configuration: &configuration::Configuration, analytics_event_create_schema: models::AnalyticsEventCreateSchema) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_analytics_event_create_schema = analytics_event_create_schema; +pub async fn create_analytics_event(configuration: &configuration::Configuration, params: CreateAnalyticsEventParams) -> Result> { let uri_str = format!("{}/analytics/events", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); @@ -82,7 +117,7 @@ pub async fn create_analytics_event(configuration: &configuration::Configuration }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_analytics_event_create_schema); + req_builder = req_builder.json(¶ms.analytics_event_create_schema); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; @@ -110,9 +145,7 @@ pub async fn create_analytics_event(configuration: &configuration::Configuration } /// When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. -pub async fn create_analytics_rule(configuration: &configuration::Configuration, analytics_rule_schema: models::AnalyticsRuleSchema) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_analytics_rule_schema = analytics_rule_schema; +pub async fn create_analytics_rule(configuration: &configuration::Configuration, params: CreateAnalyticsRuleParams) -> Result> { let uri_str = format!("{}/analytics/rules", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); @@ -128,7 +161,7 @@ pub async fn create_analytics_rule(configuration: &configuration::Configuration, }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_analytics_rule_schema); + req_builder = req_builder.json(¶ms.analytics_rule_schema); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; @@ -156,11 +189,9 @@ pub async fn create_analytics_rule(configuration: &configuration::Configuration, } /// Permanently deletes an analytics rule, given it's name -pub async fn delete_analytics_rule(configuration: &configuration::Configuration, rule_name: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_rule_name = rule_name; +pub async fn delete_analytics_rule(configuration: &configuration::Configuration, params: DeleteAnalyticsRuleParams) -> Result> { - let uri_str = format!("{}/analytics/rules/{ruleName}", configuration.base_path, ruleName=crate::apis::urlencode(p_rule_name)); + let uri_str = format!("{}/analytics/rules/{ruleName}", configuration.base_path, ruleName=crate::apis::urlencode(params.rule_name)); let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -201,11 +232,9 @@ pub async fn delete_analytics_rule(configuration: &configuration::Configuration, } /// Retrieve the details of an analytics rule, given it's name -pub async fn retrieve_analytics_rule(configuration: &configuration::Configuration, rule_name: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_rule_name = rule_name; +pub async fn retrieve_analytics_rule(configuration: &configuration::Configuration, params: RetrieveAnalyticsRuleParams) -> Result> { - let uri_str = format!("{}/analytics/rules/{ruleName}", configuration.base_path, ruleName=crate::apis::urlencode(p_rule_name)); + let uri_str = format!("{}/analytics/rules/{ruleName}", configuration.base_path, ruleName=crate::apis::urlencode(params.rule_name)); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -246,7 +275,7 @@ pub async fn retrieve_analytics_rule(configuration: &configuration::Configuratio } /// Retrieve the details of all analytics rules -pub async fn retrieve_analytics_rules(configuration: &configuration::Configuration, ) -> Result> { +pub async fn retrieve_analytics_rules(configuration: &configuration::Configuration) -> Result> { let uri_str = format!("{}/analytics/rules", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); @@ -289,12 +318,9 @@ pub async fn retrieve_analytics_rules(configuration: &configuration::Configurati } /// Upserts an analytics rule with the given name. -pub async fn upsert_analytics_rule(configuration: &configuration::Configuration, rule_name: &str, analytics_rule_upsert_schema: models::AnalyticsRuleUpsertSchema) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_rule_name = rule_name; - let p_analytics_rule_upsert_schema = analytics_rule_upsert_schema; +pub async fn upsert_analytics_rule(configuration: &configuration::Configuration, params: UpsertAnalyticsRuleParams) -> Result> { - let uri_str = format!("{}/analytics/rules/{ruleName}", configuration.base_path, ruleName=crate::apis::urlencode(p_rule_name)); + let uri_str = format!("{}/analytics/rules/{ruleName}", configuration.base_path, ruleName=crate::apis::urlencode(params.rule_name)); let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -308,7 +334,7 @@ pub async fn upsert_analytics_rule(configuration: &configuration::Configuration, }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_analytics_rule_upsert_schema); + req_builder = req_builder.json(¶ms.analytics_rule_upsert_schema); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; diff --git a/typesense_codegen/src/apis/collections_api.rs b/typesense_codegen/src/apis/collections_api.rs index 0c3f7f3..9f24d9c 100644 --- a/typesense_codegen/src/apis/collections_api.rs +++ b/typesense_codegen/src/apis/collections_api.rs @@ -14,6 +14,59 @@ use serde::{Deserialize, Serialize, de::Error as _}; use crate::{apis::ResponseContent, models}; use super::{Error, configuration, ContentType}; +/// struct for passing parameters to the method [`create_collection`] +#[derive(Clone, Debug)] +pub struct CreateCollectionParams { + /// The collection object to be created + pub collection_schema: models::CollectionSchema +} + +/// struct for passing parameters to the method [`delete_alias`] +#[derive(Clone, Debug)] +pub struct DeleteAliasParams { + /// The name of the alias to delete + pub alias_name: String +} + +/// struct for passing parameters to the method [`delete_collection`] +#[derive(Clone, Debug)] +pub struct DeleteCollectionParams { + /// The name of the collection to delete + pub collection_name: String +} + +/// struct for passing parameters to the method [`get_alias`] +#[derive(Clone, Debug)] +pub struct GetAliasParams { + /// The name of the alias to retrieve + pub alias_name: String +} + +/// struct for passing parameters to the method [`get_collection`] +#[derive(Clone, Debug)] +pub struct GetCollectionParams { + /// The name of the collection to retrieve + pub collection_name: String +} + +/// struct for passing parameters to the method [`update_collection`] +#[derive(Clone, Debug)] +pub struct UpdateCollectionParams { + /// The name of the collection to update + pub collection_name: String, + /// The document object with fields to be updated + pub collection_update_schema: models::CollectionUpdateSchema +} + +/// struct for passing parameters to the method [`upsert_alias`] +#[derive(Clone, Debug)] +pub struct UpsertAliasParams { + /// The name of the alias to create/update + pub alias_name: String, + /// Collection alias to be created/updated + pub collection_alias_schema: Option +} + /// struct for typed errors of method [`create_collection`] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -90,9 +143,7 @@ pub enum UpsertAliasError { /// When a collection is created, we give it a name and describe the fields that will be indexed from the documents added to the collection. -pub async fn create_collection(configuration: &configuration::Configuration, collection_schema: models::CollectionSchema) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_schema = collection_schema; +pub async fn create_collection(configuration: &configuration::Configuration, params: CreateCollectionParams) -> Result> { let uri_str = format!("{}/collections", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); @@ -108,7 +159,7 @@ pub async fn create_collection(configuration: &configuration::Configuration, col }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_collection_schema); + req_builder = req_builder.json(¶ms.collection_schema); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; @@ -135,11 +186,9 @@ pub async fn create_collection(configuration: &configuration::Configuration, col } } -pub async fn delete_alias(configuration: &configuration::Configuration, alias_name: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_alias_name = alias_name; +pub async fn delete_alias(configuration: &configuration::Configuration, params: DeleteAliasParams) -> Result> { - let uri_str = format!("{}/aliases/{aliasName}", configuration.base_path, aliasName=crate::apis::urlencode(p_alias_name)); + let uri_str = format!("{}/aliases/{aliasName}", configuration.base_path, aliasName=crate::apis::urlencode(params.alias_name)); let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -180,11 +229,9 @@ pub async fn delete_alias(configuration: &configuration::Configuration, alias_na } /// Permanently drops a collection. This action cannot be undone. For large collections, this might have an impact on read latencies. -pub async fn delete_collection(configuration: &configuration::Configuration, collection_name: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; +pub async fn delete_collection(configuration: &configuration::Configuration, params: DeleteCollectionParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let uri_str = format!("{}/collections/{collectionName}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -225,11 +272,9 @@ pub async fn delete_collection(configuration: &configuration::Configuration, col } /// Find out which collection an alias points to by fetching it -pub async fn get_alias(configuration: &configuration::Configuration, alias_name: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_alias_name = alias_name; +pub async fn get_alias(configuration: &configuration::Configuration, params: GetAliasParams) -> Result> { - let uri_str = format!("{}/aliases/{aliasName}", configuration.base_path, aliasName=crate::apis::urlencode(p_alias_name)); + let uri_str = format!("{}/aliases/{aliasName}", configuration.base_path, aliasName=crate::apis::urlencode(params.alias_name)); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -270,7 +315,7 @@ pub async fn get_alias(configuration: &configuration::Configuration, alias_name: } /// List all aliases and the corresponding collections that they map to. -pub async fn get_aliases(configuration: &configuration::Configuration, ) -> Result> { +pub async fn get_aliases(configuration: &configuration::Configuration) -> Result> { let uri_str = format!("{}/aliases", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); @@ -313,11 +358,9 @@ pub async fn get_aliases(configuration: &configuration::Configuration, ) -> Resu } /// Retrieve the details of a collection, given its name. -pub async fn get_collection(configuration: &configuration::Configuration, collection_name: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; +pub async fn get_collection(configuration: &configuration::Configuration, params: GetCollectionParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let uri_str = format!("{}/collections/{collectionName}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -358,7 +401,7 @@ pub async fn get_collection(configuration: &configuration::Configuration, collec } /// Returns a summary of all your collections. The collections are returned sorted by creation date, with the most recent collections appearing first. -pub async fn get_collections(configuration: &configuration::Configuration, ) -> Result, Error> { +pub async fn get_collections(configuration: &configuration::Configuration) -> Result, Error> { let uri_str = format!("{}/collections", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); @@ -401,12 +444,9 @@ pub async fn get_collections(configuration: &configuration::Configuration, ) -> } /// Update a collection's schema to modify the fields and their types. -pub async fn update_collection(configuration: &configuration::Configuration, collection_name: &str, collection_update_schema: models::CollectionUpdateSchema) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_collection_update_schema = collection_update_schema; +pub async fn update_collection(configuration: &configuration::Configuration, params: UpdateCollectionParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let uri_str = format!("{}/collections/{collectionName}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); let mut req_builder = configuration.client.request(reqwest::Method::PATCH, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -420,7 +460,7 @@ pub async fn update_collection(configuration: &configuration::Configuration, col }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_collection_update_schema); + req_builder = req_builder.json(¶ms.collection_update_schema); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; @@ -448,12 +488,9 @@ pub async fn update_collection(configuration: &configuration::Configuration, col } /// Create or update a collection alias. An alias is a virtual collection name that points to a real collection. If you're familiar with symbolic links on Linux, it's very similar to that. Aliases are useful when you want to reindex your data in the background on a new collection and switch your application to it without any changes to your code. -pub async fn upsert_alias(configuration: &configuration::Configuration, alias_name: &str, collection_alias_schema: Option) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_alias_name = alias_name; - let p_collection_alias_schema = collection_alias_schema; +pub async fn upsert_alias(configuration: &configuration::Configuration, params: UpsertAliasParams) -> Result> { - let uri_str = format!("{}/aliases/{aliasName}", configuration.base_path, aliasName=crate::apis::urlencode(p_alias_name)); + let uri_str = format!("{}/aliases/{aliasName}", configuration.base_path, aliasName=crate::apis::urlencode(params.alias_name)); let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -467,7 +504,7 @@ pub async fn upsert_alias(configuration: &configuration::Configuration, alias_na }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_collection_alias_schema); + req_builder = req_builder.json(¶ms.collection_alias_schema); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; diff --git a/typesense_codegen/src/apis/configuration.rs b/typesense_codegen/src/apis/configuration.rs index 1a85d14..8d3c1c1 100644 --- a/typesense_codegen/src/apis/configuration.rs +++ b/typesense_codegen/src/apis/configuration.rs @@ -14,7 +14,7 @@ pub struct Configuration { pub base_path: String, pub user_agent: Option, - pub client: reqwest::Client, + pub client: reqwest_middleware::ClientWithMiddleware, pub basic_auth: Option, pub oauth_access_token: Option, pub bearer_access_token: Option, @@ -41,7 +41,7 @@ impl Default for Configuration { Configuration { base_path: "http://localhost".to_owned(), user_agent: Some("OpenAPI-Generator/28.0/rust".to_owned()), - client: reqwest::Client::new(), + client: reqwest_middleware::ClientBuilder::new(reqwest::Client::new()).build(), basic_auth: None, oauth_access_token: None, bearer_access_token: None, diff --git a/typesense_codegen/src/apis/conversations_api.rs b/typesense_codegen/src/apis/conversations_api.rs index b9f8c0a..951d1c3 100644 --- a/typesense_codegen/src/apis/conversations_api.rs +++ b/typesense_codegen/src/apis/conversations_api.rs @@ -14,6 +14,34 @@ use serde::{Deserialize, Serialize, de::Error as _}; use crate::{apis::ResponseContent, models}; use super::{Error, configuration, ContentType}; +/// struct for passing parameters to the method [`create_conversation_model`] +#[derive(Clone, Debug)] +pub struct CreateConversationModelParams { + pub conversation_model_create_schema: models::ConversationModelCreateSchema +} + +/// struct for passing parameters to the method [`delete_conversation_model`] +#[derive(Clone, Debug)] +pub struct DeleteConversationModelParams { + /// The id of the conversation model to delete + pub model_id: String +} + +/// struct for passing parameters to the method [`retrieve_conversation_model`] +#[derive(Clone, Debug)] +pub struct RetrieveConversationModelParams { + /// The id of the conversation model to retrieve + pub model_id: String +} + +/// struct for passing parameters to the method [`update_conversation_model`] +#[derive(Clone, Debug)] +pub struct UpdateConversationModelParams { + /// The id of the conversation model to update + pub model_id: String, + pub conversation_model_update_schema: models::ConversationModelUpdateSchema +} + /// struct for typed errors of method [`create_conversation_model`] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -53,9 +81,7 @@ pub enum UpdateConversationModelError { /// Create a Conversation Model -pub async fn create_conversation_model(configuration: &configuration::Configuration, conversation_model_create_schema: models::ConversationModelCreateSchema) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_conversation_model_create_schema = conversation_model_create_schema; +pub async fn create_conversation_model(configuration: &configuration::Configuration, params: CreateConversationModelParams) -> Result> { let uri_str = format!("{}/conversations/models", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); @@ -71,7 +97,7 @@ pub async fn create_conversation_model(configuration: &configuration::Configurat }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_conversation_model_create_schema); + req_builder = req_builder.json(¶ms.conversation_model_create_schema); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; @@ -99,11 +125,9 @@ pub async fn create_conversation_model(configuration: &configuration::Configurat } /// Delete a conversation model -pub async fn delete_conversation_model(configuration: &configuration::Configuration, model_id: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_model_id = model_id; +pub async fn delete_conversation_model(configuration: &configuration::Configuration, params: DeleteConversationModelParams) -> Result> { - let uri_str = format!("{}/conversations/models/{modelId}", configuration.base_path, modelId=crate::apis::urlencode(p_model_id)); + let uri_str = format!("{}/conversations/models/{modelId}", configuration.base_path, modelId=crate::apis::urlencode(params.model_id)); let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -144,7 +168,7 @@ pub async fn delete_conversation_model(configuration: &configuration::Configurat } /// Retrieve all conversation models -pub async fn retrieve_all_conversation_models(configuration: &configuration::Configuration, ) -> Result, Error> { +pub async fn retrieve_all_conversation_models(configuration: &configuration::Configuration) -> Result, Error> { let uri_str = format!("{}/conversations/models", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); @@ -187,11 +211,9 @@ pub async fn retrieve_all_conversation_models(configuration: &configuration::Con } /// Retrieve a conversation model -pub async fn retrieve_conversation_model(configuration: &configuration::Configuration, model_id: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_model_id = model_id; +pub async fn retrieve_conversation_model(configuration: &configuration::Configuration, params: RetrieveConversationModelParams) -> Result> { - let uri_str = format!("{}/conversations/models/{modelId}", configuration.base_path, modelId=crate::apis::urlencode(p_model_id)); + let uri_str = format!("{}/conversations/models/{modelId}", configuration.base_path, modelId=crate::apis::urlencode(params.model_id)); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -232,12 +254,9 @@ pub async fn retrieve_conversation_model(configuration: &configuration::Configur } /// Update a conversation model -pub async fn update_conversation_model(configuration: &configuration::Configuration, model_id: &str, conversation_model_update_schema: models::ConversationModelUpdateSchema) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_model_id = model_id; - let p_conversation_model_update_schema = conversation_model_update_schema; +pub async fn update_conversation_model(configuration: &configuration::Configuration, params: UpdateConversationModelParams) -> Result> { - let uri_str = format!("{}/conversations/models/{modelId}", configuration.base_path, modelId=crate::apis::urlencode(p_model_id)); + let uri_str = format!("{}/conversations/models/{modelId}", configuration.base_path, modelId=crate::apis::urlencode(params.model_id)); let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -251,7 +270,7 @@ pub async fn update_conversation_model(configuration: &configuration::Configurat }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_conversation_model_update_schema); + req_builder = req_builder.json(¶ms.conversation_model_update_schema); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; diff --git a/typesense_codegen/src/apis/curation_api.rs b/typesense_codegen/src/apis/curation_api.rs index dbcce80..52db601 100644 --- a/typesense_codegen/src/apis/curation_api.rs +++ b/typesense_codegen/src/apis/curation_api.rs @@ -14,6 +14,33 @@ use serde::{Deserialize, Serialize, de::Error as _}; use crate::{apis::ResponseContent, models}; use super::{Error, configuration, ContentType}; +/// struct for passing parameters to the method [`delete_search_override`] +#[derive(Clone, Debug)] +pub struct DeleteSearchOverrideParams { + /// The name of the collection + pub collection_name: String, + /// The ID of the search override to delete + pub override_id: String +} + +/// struct for passing parameters to the method [`get_search_overrides`] +#[derive(Clone, Debug)] +pub struct GetSearchOverridesParams { + /// The name of the collection + pub collection_name: String +} + +/// struct for passing parameters to the method [`upsert_search_override`] +#[derive(Clone, Debug)] +pub struct UpsertSearchOverrideParams { + /// The name of the collection + pub collection_name: String, + /// The ID of the search override to create/update + pub override_id: String, + /// The search override object to be created/updated + pub search_override_schema: models::SearchOverrideSchema +} + /// struct for typed errors of method [`delete_search_override`] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -39,12 +66,9 @@ pub enum UpsertSearchOverrideError { } -pub async fn delete_search_override(configuration: &configuration::Configuration, collection_name: &str, override_id: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_override_id = override_id; +pub async fn delete_search_override(configuration: &configuration::Configuration, params: DeleteSearchOverrideParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), overrideId=crate::apis::urlencode(p_override_id)); + let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), overrideId=crate::apis::urlencode(params.override_id)); let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -84,11 +108,9 @@ pub async fn delete_search_override(configuration: &configuration::Configuration } } -pub async fn get_search_overrides(configuration: &configuration::Configuration, collection_name: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; +pub async fn get_search_overrides(configuration: &configuration::Configuration, params: GetSearchOverridesParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}/overrides", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let uri_str = format!("{}/collections/{collectionName}/overrides", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -129,13 +151,9 @@ pub async fn get_search_overrides(configuration: &configuration::Configuration, } /// Create or update an override to promote certain documents over others. Using overrides, you can include or exclude specific documents for a given query. -pub async fn upsert_search_override(configuration: &configuration::Configuration, collection_name: &str, override_id: &str, search_override_schema: models::SearchOverrideSchema) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_override_id = override_id; - let p_search_override_schema = search_override_schema; +pub async fn upsert_search_override(configuration: &configuration::Configuration, params: UpsertSearchOverrideParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), overrideId=crate::apis::urlencode(p_override_id)); + let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), overrideId=crate::apis::urlencode(params.override_id)); let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -149,7 +167,7 @@ pub async fn upsert_search_override(configuration: &configuration::Configuration }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_search_override_schema); + req_builder = req_builder.json(¶ms.search_override_schema); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; diff --git a/typesense_codegen/src/apis/debug_api.rs b/typesense_codegen/src/apis/debug_api.rs index 3e53f97..af71894 100644 --- a/typesense_codegen/src/apis/debug_api.rs +++ b/typesense_codegen/src/apis/debug_api.rs @@ -24,7 +24,7 @@ pub enum DebugError { /// Print debugging information -pub async fn debug(configuration: &configuration::Configuration, ) -> Result> { +pub async fn debug(configuration: &configuration::Configuration) -> Result> { let uri_str = format!("{}/debug", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); diff --git a/typesense_codegen/src/apis/documents_api.rs b/typesense_codegen/src/apis/documents_api.rs index c228ba1..b6ebf75 100644 --- a/typesense_codegen/src/apis/documents_api.rs +++ b/typesense_codegen/src/apis/documents_api.rs @@ -14,6 +14,277 @@ use serde::{Deserialize, Serialize, de::Error as _}; use crate::{apis::ResponseContent, models}; use super::{Error, configuration, ContentType}; +/// struct for passing parameters to the method [`delete_document`] +#[derive(Clone, Debug)] +pub struct DeleteDocumentParams { + /// The name of the collection to search for the document under + pub collection_name: String, + /// The Document ID + pub document_id: String +} + +/// struct for passing parameters to the method [`delete_documents`] +#[derive(Clone, Debug)] +pub struct DeleteDocumentsParams { + /// The name of the collection to delete documents from + pub collection_name: String, + pub filter_by: Option, + pub batch_size: Option, + pub ignore_not_found: Option, + pub truncate: Option +} + +/// struct for passing parameters to the method [`delete_search_override`] +#[derive(Clone, Debug)] +pub struct DeleteSearchOverrideParams { + /// The name of the collection + pub collection_name: String, + /// The ID of the search override to delete + pub override_id: String +} + +/// struct for passing parameters to the method [`export_documents`] +#[derive(Clone, Debug)] +pub struct ExportDocumentsParams { + /// The name of the collection + pub collection_name: String, + pub filter_by: Option, + pub include_fields: Option, + pub exclude_fields: Option +} + +/// struct for passing parameters to the method [`get_document`] +#[derive(Clone, Debug)] +pub struct GetDocumentParams { + /// The name of the collection to search for the document under + pub collection_name: String, + /// The Document ID + pub document_id: String +} + +/// struct for passing parameters to the method [`get_search_override`] +#[derive(Clone, Debug)] +pub struct GetSearchOverrideParams { + /// The name of the collection + pub collection_name: String, + /// The id of the search override + pub override_id: String +} + +/// struct for passing parameters to the method [`get_search_overrides`] +#[derive(Clone, Debug)] +pub struct GetSearchOverridesParams { + /// The name of the collection + pub collection_name: String +} + +/// struct for passing parameters to the method [`import_documents`] +#[derive(Clone, Debug)] +pub struct ImportDocumentsParams { + /// The name of the collection + pub collection_name: String, + /// The json array of documents or the JSONL file to import + pub body: String, + pub batch_size: Option, + pub return_id: Option, + pub remote_embedding_batch_size: Option, + pub return_doc: Option, + pub action: Option, + pub dirty_values: Option +} + +/// struct for passing parameters to the method [`index_document`] +#[derive(Clone, Debug)] +pub struct IndexDocumentParams { + /// The name of the collection to add the document to + pub collection_name: String, + /// The document object to be indexed + pub body: serde_json::Value, + /// Additional action to perform + pub action: Option, + /// Dealing with Dirty Data + pub dirty_values: Option +} + +/// struct for passing parameters to the method [`multi_search`] +#[derive(Clone, Debug)] +pub struct MultiSearchParams { + pub q: Option, + pub query_by: Option, + pub query_by_weights: Option, + pub text_match_type: Option, + pub prefix: Option, + pub infix: Option, + pub max_extra_prefix: Option, + pub max_extra_suffix: Option, + pub filter_by: Option, + pub sort_by: Option, + pub facet_by: Option, + pub max_facet_values: Option, + pub facet_query: Option, + pub num_typos: Option, + pub page: Option, + pub per_page: Option, + pub limit: Option, + pub offset: Option, + pub group_by: Option, + pub group_limit: Option, + pub group_missing_values: Option, + pub include_fields: Option, + pub exclude_fields: Option, + pub highlight_full_fields: Option, + pub highlight_affix_num_tokens: Option, + pub highlight_start_tag: Option, + pub highlight_end_tag: Option, + pub snippet_threshold: Option, + pub drop_tokens_threshold: Option, + pub drop_tokens_mode: Option, + pub typo_tokens_threshold: Option, + pub enable_typos_for_alpha_numerical_tokens: Option, + pub filter_curated_hits: Option, + pub enable_synonyms: Option, + pub synonym_prefix: Option, + pub synonym_num_typos: Option, + pub pinned_hits: Option, + pub hidden_hits: Option, + pub override_tags: Option, + pub highlight_fields: Option, + pub pre_segmented_query: Option, + pub preset: Option, + pub enable_overrides: Option, + pub prioritize_exact_match: Option, + pub prioritize_token_position: Option, + pub prioritize_num_matching_fields: Option, + pub enable_typos_for_numerical_tokens: Option, + pub exhaustive_search: Option, + pub search_cutoff_ms: Option, + pub use_cache: Option, + pub cache_ttl: Option, + pub min_len_1typo: Option, + pub min_len_2typo: Option, + pub vector_query: Option, + pub remote_embedding_timeout_ms: Option, + pub remote_embedding_num_tries: Option, + pub facet_strategy: Option, + pub stopwords: Option, + pub facet_return_parent: Option, + pub voice_query: Option, + pub conversation: Option, + pub conversation_model_id: Option, + pub conversation_id: Option, + pub multi_search_searches_parameter: Option +} + +/// struct for passing parameters to the method [`search_collection`] +#[derive(Clone, Debug)] +pub struct SearchCollectionParams { + /// The name of the collection to search for the document under + pub collection_name: String, + pub q: Option, + pub query_by: Option, + pub nl_query: Option, + pub nl_model_id: Option, + pub query_by_weights: Option, + pub text_match_type: Option, + pub prefix: Option, + pub infix: Option, + pub max_extra_prefix: Option, + pub max_extra_suffix: Option, + pub filter_by: Option, + pub max_filter_by_candidates: Option, + pub sort_by: Option, + pub facet_by: Option, + pub max_facet_values: Option, + pub facet_query: Option, + pub num_typos: Option, + pub page: Option, + pub per_page: Option, + pub limit: Option, + pub offset: Option, + pub group_by: Option, + pub group_limit: Option, + pub group_missing_values: Option, + pub include_fields: Option, + pub exclude_fields: Option, + pub highlight_full_fields: Option, + pub highlight_affix_num_tokens: Option, + pub highlight_start_tag: Option, + pub highlight_end_tag: Option, + pub enable_highlight_v1: Option, + pub snippet_threshold: Option, + pub drop_tokens_threshold: Option, + pub drop_tokens_mode: Option, + pub typo_tokens_threshold: Option, + pub enable_typos_for_alpha_numerical_tokens: Option, + pub filter_curated_hits: Option, + pub enable_synonyms: Option, + pub synonym_prefix: Option, + pub synonym_num_typos: Option, + pub pinned_hits: Option, + pub hidden_hits: Option, + pub override_tags: Option, + pub highlight_fields: Option, + pub split_join_tokens: Option, + pub pre_segmented_query: Option, + pub preset: Option, + pub enable_overrides: Option, + pub prioritize_exact_match: Option, + pub max_candidates: Option, + pub prioritize_token_position: Option, + pub prioritize_num_matching_fields: Option, + pub enable_typos_for_numerical_tokens: Option, + pub exhaustive_search: Option, + pub search_cutoff_ms: Option, + pub use_cache: Option, + pub cache_ttl: Option, + pub min_len_1typo: Option, + pub min_len_2typo: Option, + pub vector_query: Option, + pub remote_embedding_timeout_ms: Option, + pub remote_embedding_num_tries: Option, + pub facet_strategy: Option, + pub stopwords: Option, + pub facet_return_parent: Option, + pub voice_query: Option, + pub conversation: Option, + pub conversation_model_id: Option, + pub conversation_id: Option +} + +/// struct for passing parameters to the method [`update_document`] +#[derive(Clone, Debug)] +pub struct UpdateDocumentParams { + /// The name of the collection to search for the document under + pub collection_name: String, + /// The Document ID + pub document_id: String, + /// The document object with fields to be updated + pub body: serde_json::Value, + /// Dealing with Dirty Data + pub dirty_values: Option +} + +/// struct for passing parameters to the method [`update_documents`] +#[derive(Clone, Debug)] +pub struct UpdateDocumentsParams { + /// The name of the collection to update documents in + pub collection_name: String, + /// The document fields to be updated + pub body: serde_json::Value, + pub filter_by: Option +} + +/// struct for passing parameters to the method [`upsert_search_override`] +#[derive(Clone, Debug)] +pub struct UpsertSearchOverrideParams { + /// The name of the collection + pub collection_name: String, + /// The ID of the search override to create/update + pub override_id: String, + /// The search override object to be created/updated + pub search_override_schema: models::SearchOverrideSchema +} + /// struct for typed errors of method [`delete_document`] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -130,12 +401,9 @@ pub enum UpsertSearchOverrideError { /// Delete an individual document from a collection by using its ID. -pub async fn delete_document(configuration: &configuration::Configuration, collection_name: &str, document_id: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_document_id = document_id; +pub async fn delete_document(configuration: &configuration::Configuration, params: DeleteDocumentParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}/documents/{documentId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), documentId=crate::apis::urlencode(p_document_id)); + let uri_str = format!("{}/collections/{collectionName}/documents/{documentId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), documentId=crate::apis::urlencode(params.document_id)); let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -176,27 +444,21 @@ pub async fn delete_document(configuration: &configuration::Configuration, colle } /// Delete a bunch of documents that match a specific filter condition. Use the `batch_size` parameter to control the number of documents that should deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. -pub async fn delete_documents(configuration: &configuration::Configuration, collection_name: &str, batch_size: Option, filter_by: Option<&str>, ignore_not_found: Option, truncate: Option) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_batch_size = batch_size; - let p_filter_by = filter_by; - let p_ignore_not_found = ignore_not_found; - let p_truncate = truncate; - - let uri_str = format!("{}/collections/{collectionName}/documents", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); +pub async fn delete_documents(configuration: &configuration::Configuration, params: DeleteDocumentsParams) -> Result> { + + let uri_str = format!("{}/collections/{collectionName}/documents", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); - if let Some(ref param_value) = p_batch_size { - req_builder = req_builder.query(&[("batch_size", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_filter_by { + if let Some(ref param_value) = params.filter_by { req_builder = req_builder.query(&[("filter_by", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_ignore_not_found { + if let Some(ref param_value) = params.batch_size { + req_builder = req_builder.query(&[("batch_size", ¶m_value.to_string())]); + } + if let Some(ref param_value) = params.ignore_not_found { req_builder = req_builder.query(&[("ignore_not_found", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_truncate { + if let Some(ref param_value) = params.truncate { req_builder = req_builder.query(&[("truncate", ¶m_value.to_string())]); } if let Some(ref user_agent) = configuration.user_agent { @@ -236,12 +498,9 @@ pub async fn delete_documents(configuration: &configuration::Configuration, coll } } -pub async fn delete_search_override(configuration: &configuration::Configuration, collection_name: &str, override_id: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_override_id = override_id; +pub async fn delete_search_override(configuration: &configuration::Configuration, params: DeleteSearchOverrideParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), overrideId=crate::apis::urlencode(p_override_id)); + let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), overrideId=crate::apis::urlencode(params.override_id)); let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -282,25 +541,20 @@ pub async fn delete_search_override(configuration: &configuration::Configuration } /// Export all documents in a collection in JSON lines format. -pub async fn export_documents(configuration: &configuration::Configuration, collection_name: &str, exclude_fields: Option<&str>, filter_by: Option<&str>, include_fields: Option<&str>) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_exclude_fields = exclude_fields; - let p_filter_by = filter_by; - let p_include_fields = include_fields; - - let uri_str = format!("{}/collections/{collectionName}/documents/export", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); +pub async fn export_documents(configuration: &configuration::Configuration, params: ExportDocumentsParams) -> Result> { + + let uri_str = format!("{}/collections/{collectionName}/documents/export", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); - if let Some(ref param_value) = p_exclude_fields { - req_builder = req_builder.query(&[("exclude_fields", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_filter_by { + if let Some(ref param_value) = params.filter_by { req_builder = req_builder.query(&[("filter_by", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_include_fields { + if let Some(ref param_value) = params.include_fields { req_builder = req_builder.query(&[("include_fields", ¶m_value.to_string())]); } + if let Some(ref param_value) = params.exclude_fields { + req_builder = req_builder.query(&[("exclude_fields", ¶m_value.to_string())]); + } if let Some(ref user_agent) = configuration.user_agent { req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } @@ -339,12 +593,9 @@ pub async fn export_documents(configuration: &configuration::Configuration, coll } /// Fetch an individual document from a collection by using its ID. -pub async fn get_document(configuration: &configuration::Configuration, collection_name: &str, document_id: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_document_id = document_id; +pub async fn get_document(configuration: &configuration::Configuration, params: GetDocumentParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}/documents/{documentId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), documentId=crate::apis::urlencode(p_document_id)); + let uri_str = format!("{}/collections/{collectionName}/documents/{documentId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), documentId=crate::apis::urlencode(params.document_id)); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -385,12 +636,9 @@ pub async fn get_document(configuration: &configuration::Configuration, collecti } /// Retrieve the details of a search override, given its id. -pub async fn get_search_override(configuration: &configuration::Configuration, collection_name: &str, override_id: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_override_id = override_id; +pub async fn get_search_override(configuration: &configuration::Configuration, params: GetSearchOverrideParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), overrideId=crate::apis::urlencode(p_override_id)); + let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), overrideId=crate::apis::urlencode(params.override_id)); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -430,11 +678,9 @@ pub async fn get_search_override(configuration: &configuration::Configuration, c } } -pub async fn get_search_overrides(configuration: &configuration::Configuration, collection_name: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; +pub async fn get_search_overrides(configuration: &configuration::Configuration, params: GetSearchOverridesParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}/overrides", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let uri_str = format!("{}/collections/{collectionName}/overrides", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -475,37 +721,28 @@ pub async fn get_search_overrides(configuration: &configuration::Configuration, } /// The documents to be imported must be formatted in a newline delimited JSON structure. You can feed the output file from a Typesense export operation directly as import. -pub async fn import_documents(configuration: &configuration::Configuration, collection_name: &str, body: &str, action: Option, batch_size: Option, dirty_values: Option, remote_embedding_batch_size: Option, return_doc: Option, return_id: Option) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_body = body; - let p_action = action; - let p_batch_size = batch_size; - let p_dirty_values = dirty_values; - let p_remote_embedding_batch_size = remote_embedding_batch_size; - let p_return_doc = return_doc; - let p_return_id = return_id; - - let uri_str = format!("{}/collections/{collectionName}/documents/import", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); +pub async fn import_documents(configuration: &configuration::Configuration, params: ImportDocumentsParams) -> Result> { + + let uri_str = format!("{}/collections/{collectionName}/documents/import", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); - if let Some(ref param_value) = p_action { - req_builder = req_builder.query(&[("action", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_batch_size { + if let Some(ref param_value) = params.batch_size { req_builder = req_builder.query(&[("batch_size", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_dirty_values { - req_builder = req_builder.query(&[("dirty_values", ¶m_value.to_string())]); + if let Some(ref param_value) = params.return_id { + req_builder = req_builder.query(&[("return_id", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_remote_embedding_batch_size { + if let Some(ref param_value) = params.remote_embedding_batch_size { req_builder = req_builder.query(&[("remote_embedding_batch_size", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_return_doc { + if let Some(ref param_value) = params.return_doc { req_builder = req_builder.query(&[("return_doc", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_return_id { - req_builder = req_builder.query(&[("return_id", ¶m_value.to_string())]); + if let Some(ref param_value) = params.action { + req_builder = req_builder.query(&[("action", ¶m_value.to_string())]); + } + if let Some(ref param_value) = params.dirty_values { + req_builder = req_builder.query(&[("dirty_values", ¶m_value.to_string())]); } if let Some(ref user_agent) = configuration.user_agent { req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); @@ -518,7 +755,7 @@ pub async fn import_documents(configuration: &configuration::Configuration, coll }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_body); + req_builder = req_builder.json(¶ms.body); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; @@ -546,20 +783,15 @@ pub async fn import_documents(configuration: &configuration::Configuration, coll } /// A document to be indexed in a given collection must conform to the schema of the collection. -pub async fn index_document(configuration: &configuration::Configuration, collection_name: &str, body: serde_json::Value, action: Option<&str>, dirty_values: Option) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_body = body; - let p_action = action; - let p_dirty_values = dirty_values; - - let uri_str = format!("{}/collections/{collectionName}/documents", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); +pub async fn index_document(configuration: &configuration::Configuration, params: IndexDocumentParams) -> Result> { + + let uri_str = format!("{}/collections/{collectionName}/documents", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); - if let Some(ref param_value) = p_action { + if let Some(ref param_value) = params.action { req_builder = req_builder.query(&[("action", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_dirty_values { + if let Some(ref param_value) = params.dirty_values { req_builder = req_builder.query(&[("dirty_values", ¶m_value.to_string())]); } if let Some(ref user_agent) = configuration.user_agent { @@ -573,7 +805,7 @@ pub async fn index_document(configuration: &configuration::Configuration, collec }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_body); + req_builder = req_builder.json(¶ms.body); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; @@ -601,281 +833,200 @@ pub async fn index_document(configuration: &configuration::Configuration, collec } /// This is especially useful to avoid round-trip network latencies incurred otherwise if each of these requests are sent in separate HTTP requests. You can also use this feature to do a federated search across multiple collections in a single HTTP request. -pub async fn multi_search(configuration: &configuration::Configuration, cache_ttl: Option, conversation: Option, conversation_id: Option<&str>, conversation_model_id: Option<&str>, drop_tokens_mode: Option, drop_tokens_threshold: Option, enable_highlight_v1: Option, enable_overrides: Option, enable_synonyms: Option, enable_typos_for_alpha_numerical_tokens: Option, enable_typos_for_numerical_tokens: Option, exclude_fields: Option<&str>, exhaustive_search: Option, facet_by: Option<&str>, facet_query: Option<&str>, facet_return_parent: Option<&str>, facet_strategy: Option<&str>, filter_by: Option<&str>, filter_curated_hits: Option, group_by: Option<&str>, group_limit: Option, group_missing_values: Option, hidden_hits: Option<&str>, highlight_affix_num_tokens: Option, highlight_end_tag: Option<&str>, highlight_fields: Option<&str>, highlight_full_fields: Option<&str>, highlight_start_tag: Option<&str>, include_fields: Option<&str>, infix: Option<&str>, limit: Option, max_candidates: Option, max_extra_prefix: Option, max_extra_suffix: Option, max_facet_values: Option, max_filter_by_candidates: Option, min_len_1typo: Option, min_len_2typo: Option, num_typos: Option<&str>, offset: Option, override_tags: Option<&str>, page: Option, per_page: Option, pinned_hits: Option<&str>, pre_segmented_query: Option, prefix: Option<&str>, preset: Option<&str>, prioritize_exact_match: Option, prioritize_num_matching_fields: Option, prioritize_token_position: Option, q: Option<&str>, query_by: Option<&str>, query_by_weights: Option<&str>, remote_embedding_num_tries: Option, remote_embedding_timeout_ms: Option, search_cutoff_ms: Option, snippet_threshold: Option, sort_by: Option<&str>, split_join_tokens: Option<&str>, stopwords: Option<&str>, synonym_num_typos: Option, synonym_prefix: Option, text_match_type: Option<&str>, typo_tokens_threshold: Option, use_cache: Option, vector_query: Option<&str>, voice_query: Option<&str>, multi_search_searches_parameter: Option) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_cache_ttl = cache_ttl; - let p_conversation = conversation; - let p_conversation_id = conversation_id; - let p_conversation_model_id = conversation_model_id; - let p_drop_tokens_mode = drop_tokens_mode; - let p_drop_tokens_threshold = drop_tokens_threshold; - let p_enable_highlight_v1 = enable_highlight_v1; - let p_enable_overrides = enable_overrides; - let p_enable_synonyms = enable_synonyms; - let p_enable_typos_for_alpha_numerical_tokens = enable_typos_for_alpha_numerical_tokens; - let p_enable_typos_for_numerical_tokens = enable_typos_for_numerical_tokens; - let p_exclude_fields = exclude_fields; - let p_exhaustive_search = exhaustive_search; - let p_facet_by = facet_by; - let p_facet_query = facet_query; - let p_facet_return_parent = facet_return_parent; - let p_facet_strategy = facet_strategy; - let p_filter_by = filter_by; - let p_filter_curated_hits = filter_curated_hits; - let p_group_by = group_by; - let p_group_limit = group_limit; - let p_group_missing_values = group_missing_values; - let p_hidden_hits = hidden_hits; - let p_highlight_affix_num_tokens = highlight_affix_num_tokens; - let p_highlight_end_tag = highlight_end_tag; - let p_highlight_fields = highlight_fields; - let p_highlight_full_fields = highlight_full_fields; - let p_highlight_start_tag = highlight_start_tag; - let p_include_fields = include_fields; - let p_infix = infix; - let p_limit = limit; - let p_max_candidates = max_candidates; - let p_max_extra_prefix = max_extra_prefix; - let p_max_extra_suffix = max_extra_suffix; - let p_max_facet_values = max_facet_values; - let p_max_filter_by_candidates = max_filter_by_candidates; - let p_min_len_1typo = min_len_1typo; - let p_min_len_2typo = min_len_2typo; - let p_num_typos = num_typos; - let p_offset = offset; - let p_override_tags = override_tags; - let p_page = page; - let p_per_page = per_page; - let p_pinned_hits = pinned_hits; - let p_pre_segmented_query = pre_segmented_query; - let p_prefix = prefix; - let p_preset = preset; - let p_prioritize_exact_match = prioritize_exact_match; - let p_prioritize_num_matching_fields = prioritize_num_matching_fields; - let p_prioritize_token_position = prioritize_token_position; - let p_q = q; - let p_query_by = query_by; - let p_query_by_weights = query_by_weights; - let p_remote_embedding_num_tries = remote_embedding_num_tries; - let p_remote_embedding_timeout_ms = remote_embedding_timeout_ms; - let p_search_cutoff_ms = search_cutoff_ms; - let p_snippet_threshold = snippet_threshold; - let p_sort_by = sort_by; - let p_split_join_tokens = split_join_tokens; - let p_stopwords = stopwords; - let p_synonym_num_typos = synonym_num_typos; - let p_synonym_prefix = synonym_prefix; - let p_text_match_type = text_match_type; - let p_typo_tokens_threshold = typo_tokens_threshold; - let p_use_cache = use_cache; - let p_vector_query = vector_query; - let p_voice_query = voice_query; - let p_multi_search_searches_parameter = multi_search_searches_parameter; +pub async fn multi_search(configuration: &configuration::Configuration, params: MultiSearchParams) -> Result> { let uri_str = format!("{}/multi_search", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); - if let Some(ref param_value) = p_cache_ttl { - req_builder = req_builder.query(&[("cache_ttl", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_conversation { - req_builder = req_builder.query(&[("conversation", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_conversation_id { - req_builder = req_builder.query(&[("conversation_id", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_conversation_model_id { - req_builder = req_builder.query(&[("conversation_model_id", ¶m_value.to_string())]); + if let Some(ref param_value) = params.q { + req_builder = req_builder.query(&[("q", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_drop_tokens_mode { - req_builder = req_builder.query(&[("drop_tokens_mode", ¶m_value.to_string())]); + if let Some(ref param_value) = params.query_by { + req_builder = req_builder.query(&[("query_by", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_drop_tokens_threshold { - req_builder = req_builder.query(&[("drop_tokens_threshold", ¶m_value.to_string())]); + if let Some(ref param_value) = params.query_by_weights { + req_builder = req_builder.query(&[("query_by_weights", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_enable_highlight_v1 { - req_builder = req_builder.query(&[("enable_highlight_v1", ¶m_value.to_string())]); + if let Some(ref param_value) = params.text_match_type { + req_builder = req_builder.query(&[("text_match_type", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_enable_overrides { - req_builder = req_builder.query(&[("enable_overrides", ¶m_value.to_string())]); + if let Some(ref param_value) = params.prefix { + req_builder = req_builder.query(&[("prefix", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_enable_synonyms { - req_builder = req_builder.query(&[("enable_synonyms", ¶m_value.to_string())]); + if let Some(ref param_value) = params.infix { + req_builder = req_builder.query(&[("infix", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_enable_typos_for_alpha_numerical_tokens { - req_builder = req_builder.query(&[("enable_typos_for_alpha_numerical_tokens", ¶m_value.to_string())]); + if let Some(ref param_value) = params.max_extra_prefix { + req_builder = req_builder.query(&[("max_extra_prefix", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_enable_typos_for_numerical_tokens { - req_builder = req_builder.query(&[("enable_typos_for_numerical_tokens", ¶m_value.to_string())]); + if let Some(ref param_value) = params.max_extra_suffix { + req_builder = req_builder.query(&[("max_extra_suffix", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_exclude_fields { - req_builder = req_builder.query(&[("exclude_fields", ¶m_value.to_string())]); + if let Some(ref param_value) = params.filter_by { + req_builder = req_builder.query(&[("filter_by", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_exhaustive_search { - req_builder = req_builder.query(&[("exhaustive_search", ¶m_value.to_string())]); + if let Some(ref param_value) = params.sort_by { + req_builder = req_builder.query(&[("sort_by", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_facet_by { + if let Some(ref param_value) = params.facet_by { req_builder = req_builder.query(&[("facet_by", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_facet_query { + if let Some(ref param_value) = params.max_facet_values { + req_builder = req_builder.query(&[("max_facet_values", ¶m_value.to_string())]); + } + if let Some(ref param_value) = params.facet_query { req_builder = req_builder.query(&[("facet_query", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_facet_return_parent { - req_builder = req_builder.query(&[("facet_return_parent", ¶m_value.to_string())]); + if let Some(ref param_value) = params.num_typos { + req_builder = req_builder.query(&[("num_typos", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_facet_strategy { - req_builder = req_builder.query(&[("facet_strategy", ¶m_value.to_string())]); + if let Some(ref param_value) = params.page { + req_builder = req_builder.query(&[("page", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_filter_by { - req_builder = req_builder.query(&[("filter_by", ¶m_value.to_string())]); + if let Some(ref param_value) = params.per_page { + req_builder = req_builder.query(&[("per_page", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_filter_curated_hits { - req_builder = req_builder.query(&[("filter_curated_hits", ¶m_value.to_string())]); + if let Some(ref param_value) = params.limit { + req_builder = req_builder.query(&[("limit", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_group_by { + if let Some(ref param_value) = params.offset { + req_builder = req_builder.query(&[("offset", ¶m_value.to_string())]); + } + if let Some(ref param_value) = params.group_by { req_builder = req_builder.query(&[("group_by", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_group_limit { + if let Some(ref param_value) = params.group_limit { req_builder = req_builder.query(&[("group_limit", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_group_missing_values { + if let Some(ref param_value) = params.group_missing_values { req_builder = req_builder.query(&[("group_missing_values", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_hidden_hits { - req_builder = req_builder.query(&[("hidden_hits", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_highlight_affix_num_tokens { - req_builder = req_builder.query(&[("highlight_affix_num_tokens", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_highlight_end_tag { - req_builder = req_builder.query(&[("highlight_end_tag", ¶m_value.to_string())]); + if let Some(ref param_value) = params.include_fields { + req_builder = req_builder.query(&[("include_fields", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_highlight_fields { - req_builder = req_builder.query(&[("highlight_fields", ¶m_value.to_string())]); + if let Some(ref param_value) = params.exclude_fields { + req_builder = req_builder.query(&[("exclude_fields", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_highlight_full_fields { + if let Some(ref param_value) = params.highlight_full_fields { req_builder = req_builder.query(&[("highlight_full_fields", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_highlight_start_tag { + if let Some(ref param_value) = params.highlight_affix_num_tokens { + req_builder = req_builder.query(&[("highlight_affix_num_tokens", ¶m_value.to_string())]); + } + if let Some(ref param_value) = params.highlight_start_tag { req_builder = req_builder.query(&[("highlight_start_tag", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_include_fields { - req_builder = req_builder.query(&[("include_fields", ¶m_value.to_string())]); + if let Some(ref param_value) = params.highlight_end_tag { + req_builder = req_builder.query(&[("highlight_end_tag", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_infix { - req_builder = req_builder.query(&[("infix", ¶m_value.to_string())]); + if let Some(ref param_value) = params.snippet_threshold { + req_builder = req_builder.query(&[("snippet_threshold", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_limit { - req_builder = req_builder.query(&[("limit", ¶m_value.to_string())]); + if let Some(ref param_value) = params.drop_tokens_threshold { + req_builder = req_builder.query(&[("drop_tokens_threshold", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_max_candidates { - req_builder = req_builder.query(&[("max_candidates", ¶m_value.to_string())]); + if let Some(ref param_value) = params.drop_tokens_mode { + req_builder = req_builder.query(&[("drop_tokens_mode", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_max_extra_prefix { - req_builder = req_builder.query(&[("max_extra_prefix", ¶m_value.to_string())]); + if let Some(ref param_value) = params.typo_tokens_threshold { + req_builder = req_builder.query(&[("typo_tokens_threshold", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_max_extra_suffix { - req_builder = req_builder.query(&[("max_extra_suffix", ¶m_value.to_string())]); + if let Some(ref param_value) = params.enable_typos_for_alpha_numerical_tokens { + req_builder = req_builder.query(&[("enable_typos_for_alpha_numerical_tokens", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_max_facet_values { - req_builder = req_builder.query(&[("max_facet_values", ¶m_value.to_string())]); + if let Some(ref param_value) = params.filter_curated_hits { + req_builder = req_builder.query(&[("filter_curated_hits", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_max_filter_by_candidates { - req_builder = req_builder.query(&[("max_filter_by_candidates", ¶m_value.to_string())]); + if let Some(ref param_value) = params.enable_synonyms { + req_builder = req_builder.query(&[("enable_synonyms", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_min_len_1typo { - req_builder = req_builder.query(&[("min_len_1typo", ¶m_value.to_string())]); + if let Some(ref param_value) = params.synonym_prefix { + req_builder = req_builder.query(&[("synonym_prefix", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_min_len_2typo { - req_builder = req_builder.query(&[("min_len_2typo", ¶m_value.to_string())]); + if let Some(ref param_value) = params.synonym_num_typos { + req_builder = req_builder.query(&[("synonym_num_typos", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_num_typos { - req_builder = req_builder.query(&[("num_typos", ¶m_value.to_string())]); + if let Some(ref param_value) = params.pinned_hits { + req_builder = req_builder.query(&[("pinned_hits", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_offset { - req_builder = req_builder.query(&[("offset", ¶m_value.to_string())]); + if let Some(ref param_value) = params.hidden_hits { + req_builder = req_builder.query(&[("hidden_hits", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_override_tags { + if let Some(ref param_value) = params.override_tags { req_builder = req_builder.query(&[("override_tags", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_page { - req_builder = req_builder.query(&[("page", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_per_page { - req_builder = req_builder.query(&[("per_page", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_pinned_hits { - req_builder = req_builder.query(&[("pinned_hits", ¶m_value.to_string())]); + if let Some(ref param_value) = params.highlight_fields { + req_builder = req_builder.query(&[("highlight_fields", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_pre_segmented_query { + if let Some(ref param_value) = params.pre_segmented_query { req_builder = req_builder.query(&[("pre_segmented_query", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_prefix { - req_builder = req_builder.query(&[("prefix", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_preset { + if let Some(ref param_value) = params.preset { req_builder = req_builder.query(&[("preset", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_prioritize_exact_match { - req_builder = req_builder.query(&[("prioritize_exact_match", ¶m_value.to_string())]); + if let Some(ref param_value) = params.enable_overrides { + req_builder = req_builder.query(&[("enable_overrides", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_prioritize_num_matching_fields { - req_builder = req_builder.query(&[("prioritize_num_matching_fields", ¶m_value.to_string())]); + if let Some(ref param_value) = params.prioritize_exact_match { + req_builder = req_builder.query(&[("prioritize_exact_match", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_prioritize_token_position { + if let Some(ref param_value) = params.prioritize_token_position { req_builder = req_builder.query(&[("prioritize_token_position", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_q { - req_builder = req_builder.query(&[("q", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_query_by { - req_builder = req_builder.query(&[("query_by", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_query_by_weights { - req_builder = req_builder.query(&[("query_by_weights", ¶m_value.to_string())]); + if let Some(ref param_value) = params.prioritize_num_matching_fields { + req_builder = req_builder.query(&[("prioritize_num_matching_fields", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_remote_embedding_num_tries { - req_builder = req_builder.query(&[("remote_embedding_num_tries", ¶m_value.to_string())]); + if let Some(ref param_value) = params.enable_typos_for_numerical_tokens { + req_builder = req_builder.query(&[("enable_typos_for_numerical_tokens", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_remote_embedding_timeout_ms { - req_builder = req_builder.query(&[("remote_embedding_timeout_ms", ¶m_value.to_string())]); + if let Some(ref param_value) = params.exhaustive_search { + req_builder = req_builder.query(&[("exhaustive_search", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_search_cutoff_ms { + if let Some(ref param_value) = params.search_cutoff_ms { req_builder = req_builder.query(&[("search_cutoff_ms", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_snippet_threshold { - req_builder = req_builder.query(&[("snippet_threshold", ¶m_value.to_string())]); + if let Some(ref param_value) = params.use_cache { + req_builder = req_builder.query(&[("use_cache", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_sort_by { - req_builder = req_builder.query(&[("sort_by", ¶m_value.to_string())]); + if let Some(ref param_value) = params.cache_ttl { + req_builder = req_builder.query(&[("cache_ttl", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_split_join_tokens { - req_builder = req_builder.query(&[("split_join_tokens", ¶m_value.to_string())]); + if let Some(ref param_value) = params.min_len_1typo { + req_builder = req_builder.query(&[("min_len_1typo", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_stopwords { - req_builder = req_builder.query(&[("stopwords", ¶m_value.to_string())]); + if let Some(ref param_value) = params.min_len_2typo { + req_builder = req_builder.query(&[("min_len_2typo", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_synonym_num_typos { - req_builder = req_builder.query(&[("synonym_num_typos", ¶m_value.to_string())]); + if let Some(ref param_value) = params.vector_query { + req_builder = req_builder.query(&[("vector_query", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_synonym_prefix { - req_builder = req_builder.query(&[("synonym_prefix", ¶m_value.to_string())]); + if let Some(ref param_value) = params.remote_embedding_timeout_ms { + req_builder = req_builder.query(&[("remote_embedding_timeout_ms", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_text_match_type { - req_builder = req_builder.query(&[("text_match_type", ¶m_value.to_string())]); + if let Some(ref param_value) = params.remote_embedding_num_tries { + req_builder = req_builder.query(&[("remote_embedding_num_tries", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_typo_tokens_threshold { - req_builder = req_builder.query(&[("typo_tokens_threshold", ¶m_value.to_string())]); + if let Some(ref param_value) = params.facet_strategy { + req_builder = req_builder.query(&[("facet_strategy", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_use_cache { - req_builder = req_builder.query(&[("use_cache", ¶m_value.to_string())]); + if let Some(ref param_value) = params.stopwords { + req_builder = req_builder.query(&[("stopwords", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_vector_query { - req_builder = req_builder.query(&[("vector_query", ¶m_value.to_string())]); + if let Some(ref param_value) = params.facet_return_parent { + req_builder = req_builder.query(&[("facet_return_parent", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_voice_query { + if let Some(ref param_value) = params.voice_query { req_builder = req_builder.query(&[("voice_query", ¶m_value.to_string())]); } + if let Some(ref param_value) = params.conversation { + req_builder = req_builder.query(&[("conversation", ¶m_value.to_string())]); + } + if let Some(ref param_value) = params.conversation_model_id { + req_builder = req_builder.query(&[("conversation_model_id", ¶m_value.to_string())]); + } + if let Some(ref param_value) = params.conversation_id { + req_builder = req_builder.query(&[("conversation_id", ¶m_value.to_string())]); + } if let Some(ref user_agent) = configuration.user_agent { req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } @@ -887,7 +1038,7 @@ pub async fn multi_search(configuration: &configuration::Configuration, cache_tt }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_multi_search_searches_parameter); + req_builder = req_builder.json(¶ms.multi_search_searches_parameter); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; @@ -915,281 +1066,218 @@ pub async fn multi_search(configuration: &configuration::Configuration, cache_tt } /// Search for documents in a collection that match the search criteria. -pub async fn search_collection(configuration: &configuration::Configuration, collection_name: &str, cache_ttl: Option, conversation: Option, conversation_id: Option<&str>, conversation_model_id: Option<&str>, drop_tokens_mode: Option, drop_tokens_threshold: Option, enable_highlight_v1: Option, enable_overrides: Option, enable_synonyms: Option, enable_typos_for_alpha_numerical_tokens: Option, enable_typos_for_numerical_tokens: Option, exclude_fields: Option<&str>, exhaustive_search: Option, facet_by: Option<&str>, facet_query: Option<&str>, facet_return_parent: Option<&str>, facet_strategy: Option<&str>, filter_by: Option<&str>, filter_curated_hits: Option, group_by: Option<&str>, group_limit: Option, group_missing_values: Option, hidden_hits: Option<&str>, highlight_affix_num_tokens: Option, highlight_end_tag: Option<&str>, highlight_fields: Option<&str>, highlight_full_fields: Option<&str>, highlight_start_tag: Option<&str>, include_fields: Option<&str>, infix: Option<&str>, limit: Option, max_candidates: Option, max_extra_prefix: Option, max_extra_suffix: Option, max_facet_values: Option, max_filter_by_candidates: Option, min_len_1typo: Option, min_len_2typo: Option, num_typos: Option<&str>, offset: Option, override_tags: Option<&str>, page: Option, per_page: Option, pinned_hits: Option<&str>, pre_segmented_query: Option, prefix: Option<&str>, preset: Option<&str>, prioritize_exact_match: Option, prioritize_num_matching_fields: Option, prioritize_token_position: Option, q: Option<&str>, query_by: Option<&str>, query_by_weights: Option<&str>, remote_embedding_num_tries: Option, remote_embedding_timeout_ms: Option, search_cutoff_ms: Option, snippet_threshold: Option, sort_by: Option<&str>, split_join_tokens: Option<&str>, stopwords: Option<&str>, synonym_num_typos: Option, synonym_prefix: Option, text_match_type: Option<&str>, typo_tokens_threshold: Option, use_cache: Option, vector_query: Option<&str>, voice_query: Option<&str>) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_cache_ttl = cache_ttl; - let p_conversation = conversation; - let p_conversation_id = conversation_id; - let p_conversation_model_id = conversation_model_id; - let p_drop_tokens_mode = drop_tokens_mode; - let p_drop_tokens_threshold = drop_tokens_threshold; - let p_enable_highlight_v1 = enable_highlight_v1; - let p_enable_overrides = enable_overrides; - let p_enable_synonyms = enable_synonyms; - let p_enable_typos_for_alpha_numerical_tokens = enable_typos_for_alpha_numerical_tokens; - let p_enable_typos_for_numerical_tokens = enable_typos_for_numerical_tokens; - let p_exclude_fields = exclude_fields; - let p_exhaustive_search = exhaustive_search; - let p_facet_by = facet_by; - let p_facet_query = facet_query; - let p_facet_return_parent = facet_return_parent; - let p_facet_strategy = facet_strategy; - let p_filter_by = filter_by; - let p_filter_curated_hits = filter_curated_hits; - let p_group_by = group_by; - let p_group_limit = group_limit; - let p_group_missing_values = group_missing_values; - let p_hidden_hits = hidden_hits; - let p_highlight_affix_num_tokens = highlight_affix_num_tokens; - let p_highlight_end_tag = highlight_end_tag; - let p_highlight_fields = highlight_fields; - let p_highlight_full_fields = highlight_full_fields; - let p_highlight_start_tag = highlight_start_tag; - let p_include_fields = include_fields; - let p_infix = infix; - let p_limit = limit; - let p_max_candidates = max_candidates; - let p_max_extra_prefix = max_extra_prefix; - let p_max_extra_suffix = max_extra_suffix; - let p_max_facet_values = max_facet_values; - let p_max_filter_by_candidates = max_filter_by_candidates; - let p_min_len_1typo = min_len_1typo; - let p_min_len_2typo = min_len_2typo; - let p_num_typos = num_typos; - let p_offset = offset; - let p_override_tags = override_tags; - let p_page = page; - let p_per_page = per_page; - let p_pinned_hits = pinned_hits; - let p_pre_segmented_query = pre_segmented_query; - let p_prefix = prefix; - let p_preset = preset; - let p_prioritize_exact_match = prioritize_exact_match; - let p_prioritize_num_matching_fields = prioritize_num_matching_fields; - let p_prioritize_token_position = prioritize_token_position; - let p_q = q; - let p_query_by = query_by; - let p_query_by_weights = query_by_weights; - let p_remote_embedding_num_tries = remote_embedding_num_tries; - let p_remote_embedding_timeout_ms = remote_embedding_timeout_ms; - let p_search_cutoff_ms = search_cutoff_ms; - let p_snippet_threshold = snippet_threshold; - let p_sort_by = sort_by; - let p_split_join_tokens = split_join_tokens; - let p_stopwords = stopwords; - let p_synonym_num_typos = synonym_num_typos; - let p_synonym_prefix = synonym_prefix; - let p_text_match_type = text_match_type; - let p_typo_tokens_threshold = typo_tokens_threshold; - let p_use_cache = use_cache; - let p_vector_query = vector_query; - let p_voice_query = voice_query; - - let uri_str = format!("{}/collections/{collectionName}/documents/search", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); +pub async fn search_collection(configuration: &configuration::Configuration, params: SearchCollectionParams) -> Result> { + + let uri_str = format!("{}/collections/{collectionName}/documents/search", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); - if let Some(ref param_value) = p_cache_ttl { - req_builder = req_builder.query(&[("cache_ttl", ¶m_value.to_string())]); + if let Some(ref param_value) = params.q { + req_builder = req_builder.query(&[("q", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_conversation { - req_builder = req_builder.query(&[("conversation", ¶m_value.to_string())]); + if let Some(ref param_value) = params.query_by { + req_builder = req_builder.query(&[("query_by", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_conversation_id { - req_builder = req_builder.query(&[("conversation_id", ¶m_value.to_string())]); + if let Some(ref param_value) = params.nl_query { + req_builder = req_builder.query(&[("nl_query", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_conversation_model_id { - req_builder = req_builder.query(&[("conversation_model_id", ¶m_value.to_string())]); + if let Some(ref param_value) = params.nl_model_id { + req_builder = req_builder.query(&[("nl_model_id", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_drop_tokens_mode { - req_builder = req_builder.query(&[("drop_tokens_mode", ¶m_value.to_string())]); + if let Some(ref param_value) = params.query_by_weights { + req_builder = req_builder.query(&[("query_by_weights", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_drop_tokens_threshold { - req_builder = req_builder.query(&[("drop_tokens_threshold", ¶m_value.to_string())]); + if let Some(ref param_value) = params.text_match_type { + req_builder = req_builder.query(&[("text_match_type", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_enable_highlight_v1 { - req_builder = req_builder.query(&[("enable_highlight_v1", ¶m_value.to_string())]); + if let Some(ref param_value) = params.prefix { + req_builder = req_builder.query(&[("prefix", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_enable_overrides { - req_builder = req_builder.query(&[("enable_overrides", ¶m_value.to_string())]); + if let Some(ref param_value) = params.infix { + req_builder = req_builder.query(&[("infix", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_enable_synonyms { - req_builder = req_builder.query(&[("enable_synonyms", ¶m_value.to_string())]); + if let Some(ref param_value) = params.max_extra_prefix { + req_builder = req_builder.query(&[("max_extra_prefix", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_enable_typos_for_alpha_numerical_tokens { - req_builder = req_builder.query(&[("enable_typos_for_alpha_numerical_tokens", ¶m_value.to_string())]); + if let Some(ref param_value) = params.max_extra_suffix { + req_builder = req_builder.query(&[("max_extra_suffix", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_enable_typos_for_numerical_tokens { - req_builder = req_builder.query(&[("enable_typos_for_numerical_tokens", ¶m_value.to_string())]); + if let Some(ref param_value) = params.filter_by { + req_builder = req_builder.query(&[("filter_by", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_exclude_fields { - req_builder = req_builder.query(&[("exclude_fields", ¶m_value.to_string())]); + if let Some(ref param_value) = params.max_filter_by_candidates { + req_builder = req_builder.query(&[("max_filter_by_candidates", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_exhaustive_search { - req_builder = req_builder.query(&[("exhaustive_search", ¶m_value.to_string())]); + if let Some(ref param_value) = params.sort_by { + req_builder = req_builder.query(&[("sort_by", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_facet_by { + if let Some(ref param_value) = params.facet_by { req_builder = req_builder.query(&[("facet_by", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_facet_query { + if let Some(ref param_value) = params.max_facet_values { + req_builder = req_builder.query(&[("max_facet_values", ¶m_value.to_string())]); + } + if let Some(ref param_value) = params.facet_query { req_builder = req_builder.query(&[("facet_query", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_facet_return_parent { - req_builder = req_builder.query(&[("facet_return_parent", ¶m_value.to_string())]); + if let Some(ref param_value) = params.num_typos { + req_builder = req_builder.query(&[("num_typos", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_facet_strategy { - req_builder = req_builder.query(&[("facet_strategy", ¶m_value.to_string())]); + if let Some(ref param_value) = params.page { + req_builder = req_builder.query(&[("page", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_filter_by { - req_builder = req_builder.query(&[("filter_by", ¶m_value.to_string())]); + if let Some(ref param_value) = params.per_page { + req_builder = req_builder.query(&[("per_page", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_filter_curated_hits { - req_builder = req_builder.query(&[("filter_curated_hits", ¶m_value.to_string())]); + if let Some(ref param_value) = params.limit { + req_builder = req_builder.query(&[("limit", ¶m_value.to_string())]); + } + if let Some(ref param_value) = params.offset { + req_builder = req_builder.query(&[("offset", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_group_by { + if let Some(ref param_value) = params.group_by { req_builder = req_builder.query(&[("group_by", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_group_limit { + if let Some(ref param_value) = params.group_limit { req_builder = req_builder.query(&[("group_limit", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_group_missing_values { + if let Some(ref param_value) = params.group_missing_values { req_builder = req_builder.query(&[("group_missing_values", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_hidden_hits { - req_builder = req_builder.query(&[("hidden_hits", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_highlight_affix_num_tokens { - req_builder = req_builder.query(&[("highlight_affix_num_tokens", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_highlight_end_tag { - req_builder = req_builder.query(&[("highlight_end_tag", ¶m_value.to_string())]); + if let Some(ref param_value) = params.include_fields { + req_builder = req_builder.query(&[("include_fields", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_highlight_fields { - req_builder = req_builder.query(&[("highlight_fields", ¶m_value.to_string())]); + if let Some(ref param_value) = params.exclude_fields { + req_builder = req_builder.query(&[("exclude_fields", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_highlight_full_fields { + if let Some(ref param_value) = params.highlight_full_fields { req_builder = req_builder.query(&[("highlight_full_fields", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_highlight_start_tag { + if let Some(ref param_value) = params.highlight_affix_num_tokens { + req_builder = req_builder.query(&[("highlight_affix_num_tokens", ¶m_value.to_string())]); + } + if let Some(ref param_value) = params.highlight_start_tag { req_builder = req_builder.query(&[("highlight_start_tag", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_include_fields { - req_builder = req_builder.query(&[("include_fields", ¶m_value.to_string())]); + if let Some(ref param_value) = params.highlight_end_tag { + req_builder = req_builder.query(&[("highlight_end_tag", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_infix { - req_builder = req_builder.query(&[("infix", ¶m_value.to_string())]); + if let Some(ref param_value) = params.enable_highlight_v1 { + req_builder = req_builder.query(&[("enable_highlight_v1", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_limit { - req_builder = req_builder.query(&[("limit", ¶m_value.to_string())]); + if let Some(ref param_value) = params.snippet_threshold { + req_builder = req_builder.query(&[("snippet_threshold", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_max_candidates { - req_builder = req_builder.query(&[("max_candidates", ¶m_value.to_string())]); + if let Some(ref param_value) = params.drop_tokens_threshold { + req_builder = req_builder.query(&[("drop_tokens_threshold", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_max_extra_prefix { - req_builder = req_builder.query(&[("max_extra_prefix", ¶m_value.to_string())]); + if let Some(ref param_value) = params.drop_tokens_mode { + req_builder = req_builder.query(&[("drop_tokens_mode", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_max_extra_suffix { - req_builder = req_builder.query(&[("max_extra_suffix", ¶m_value.to_string())]); + if let Some(ref param_value) = params.typo_tokens_threshold { + req_builder = req_builder.query(&[("typo_tokens_threshold", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_max_facet_values { - req_builder = req_builder.query(&[("max_facet_values", ¶m_value.to_string())]); + if let Some(ref param_value) = params.enable_typos_for_alpha_numerical_tokens { + req_builder = req_builder.query(&[("enable_typos_for_alpha_numerical_tokens", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_max_filter_by_candidates { - req_builder = req_builder.query(&[("max_filter_by_candidates", ¶m_value.to_string())]); + if let Some(ref param_value) = params.filter_curated_hits { + req_builder = req_builder.query(&[("filter_curated_hits", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_min_len_1typo { - req_builder = req_builder.query(&[("min_len_1typo", ¶m_value.to_string())]); + if let Some(ref param_value) = params.enable_synonyms { + req_builder = req_builder.query(&[("enable_synonyms", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_min_len_2typo { - req_builder = req_builder.query(&[("min_len_2typo", ¶m_value.to_string())]); + if let Some(ref param_value) = params.synonym_prefix { + req_builder = req_builder.query(&[("synonym_prefix", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_num_typos { - req_builder = req_builder.query(&[("num_typos", ¶m_value.to_string())]); + if let Some(ref param_value) = params.synonym_num_typos { + req_builder = req_builder.query(&[("synonym_num_typos", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_offset { - req_builder = req_builder.query(&[("offset", ¶m_value.to_string())]); + if let Some(ref param_value) = params.pinned_hits { + req_builder = req_builder.query(&[("pinned_hits", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_override_tags { - req_builder = req_builder.query(&[("override_tags", ¶m_value.to_string())]); + if let Some(ref param_value) = params.hidden_hits { + req_builder = req_builder.query(&[("hidden_hits", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_page { - req_builder = req_builder.query(&[("page", ¶m_value.to_string())]); + if let Some(ref param_value) = params.override_tags { + req_builder = req_builder.query(&[("override_tags", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_per_page { - req_builder = req_builder.query(&[("per_page", ¶m_value.to_string())]); + if let Some(ref param_value) = params.highlight_fields { + req_builder = req_builder.query(&[("highlight_fields", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_pinned_hits { - req_builder = req_builder.query(&[("pinned_hits", ¶m_value.to_string())]); + if let Some(ref param_value) = params.split_join_tokens { + req_builder = req_builder.query(&[("split_join_tokens", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_pre_segmented_query { + if let Some(ref param_value) = params.pre_segmented_query { req_builder = req_builder.query(&[("pre_segmented_query", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_prefix { - req_builder = req_builder.query(&[("prefix", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_preset { + if let Some(ref param_value) = params.preset { req_builder = req_builder.query(&[("preset", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_prioritize_exact_match { + if let Some(ref param_value) = params.enable_overrides { + req_builder = req_builder.query(&[("enable_overrides", ¶m_value.to_string())]); + } + if let Some(ref param_value) = params.prioritize_exact_match { req_builder = req_builder.query(&[("prioritize_exact_match", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_prioritize_num_matching_fields { - req_builder = req_builder.query(&[("prioritize_num_matching_fields", ¶m_value.to_string())]); + if let Some(ref param_value) = params.max_candidates { + req_builder = req_builder.query(&[("max_candidates", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_prioritize_token_position { + if let Some(ref param_value) = params.prioritize_token_position { req_builder = req_builder.query(&[("prioritize_token_position", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_q { - req_builder = req_builder.query(&[("q", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_query_by { - req_builder = req_builder.query(&[("query_by", ¶m_value.to_string())]); - } - if let Some(ref param_value) = p_query_by_weights { - req_builder = req_builder.query(&[("query_by_weights", ¶m_value.to_string())]); + if let Some(ref param_value) = params.prioritize_num_matching_fields { + req_builder = req_builder.query(&[("prioritize_num_matching_fields", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_remote_embedding_num_tries { - req_builder = req_builder.query(&[("remote_embedding_num_tries", ¶m_value.to_string())]); + if let Some(ref param_value) = params.enable_typos_for_numerical_tokens { + req_builder = req_builder.query(&[("enable_typos_for_numerical_tokens", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_remote_embedding_timeout_ms { - req_builder = req_builder.query(&[("remote_embedding_timeout_ms", ¶m_value.to_string())]); + if let Some(ref param_value) = params.exhaustive_search { + req_builder = req_builder.query(&[("exhaustive_search", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_search_cutoff_ms { + if let Some(ref param_value) = params.search_cutoff_ms { req_builder = req_builder.query(&[("search_cutoff_ms", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_snippet_threshold { - req_builder = req_builder.query(&[("snippet_threshold", ¶m_value.to_string())]); + if let Some(ref param_value) = params.use_cache { + req_builder = req_builder.query(&[("use_cache", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_sort_by { - req_builder = req_builder.query(&[("sort_by", ¶m_value.to_string())]); + if let Some(ref param_value) = params.cache_ttl { + req_builder = req_builder.query(&[("cache_ttl", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_split_join_tokens { - req_builder = req_builder.query(&[("split_join_tokens", ¶m_value.to_string())]); + if let Some(ref param_value) = params.min_len_1typo { + req_builder = req_builder.query(&[("min_len_1typo", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_stopwords { - req_builder = req_builder.query(&[("stopwords", ¶m_value.to_string())]); + if let Some(ref param_value) = params.min_len_2typo { + req_builder = req_builder.query(&[("min_len_2typo", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_synonym_num_typos { - req_builder = req_builder.query(&[("synonym_num_typos", ¶m_value.to_string())]); + if let Some(ref param_value) = params.vector_query { + req_builder = req_builder.query(&[("vector_query", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_synonym_prefix { - req_builder = req_builder.query(&[("synonym_prefix", ¶m_value.to_string())]); + if let Some(ref param_value) = params.remote_embedding_timeout_ms { + req_builder = req_builder.query(&[("remote_embedding_timeout_ms", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_text_match_type { - req_builder = req_builder.query(&[("text_match_type", ¶m_value.to_string())]); + if let Some(ref param_value) = params.remote_embedding_num_tries { + req_builder = req_builder.query(&[("remote_embedding_num_tries", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_typo_tokens_threshold { - req_builder = req_builder.query(&[("typo_tokens_threshold", ¶m_value.to_string())]); + if let Some(ref param_value) = params.facet_strategy { + req_builder = req_builder.query(&[("facet_strategy", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_use_cache { - req_builder = req_builder.query(&[("use_cache", ¶m_value.to_string())]); + if let Some(ref param_value) = params.stopwords { + req_builder = req_builder.query(&[("stopwords", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_vector_query { - req_builder = req_builder.query(&[("vector_query", ¶m_value.to_string())]); + if let Some(ref param_value) = params.facet_return_parent { + req_builder = req_builder.query(&[("facet_return_parent", ¶m_value.to_string())]); } - if let Some(ref param_value) = p_voice_query { + if let Some(ref param_value) = params.voice_query { req_builder = req_builder.query(&[("voice_query", ¶m_value.to_string())]); } + if let Some(ref param_value) = params.conversation { + req_builder = req_builder.query(&[("conversation", ¶m_value.to_string())]); + } + if let Some(ref param_value) = params.conversation_model_id { + req_builder = req_builder.query(&[("conversation_model_id", ¶m_value.to_string())]); + } + if let Some(ref param_value) = params.conversation_id { + req_builder = req_builder.query(&[("conversation_id", ¶m_value.to_string())]); + } if let Some(ref user_agent) = configuration.user_agent { req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } @@ -1228,17 +1316,12 @@ pub async fn search_collection(configuration: &configuration::Configuration, col } /// Update an individual document from a collection by using its ID. The update can be partial. -pub async fn update_document(configuration: &configuration::Configuration, collection_name: &str, document_id: &str, body: serde_json::Value, dirty_values: Option) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_document_id = document_id; - let p_body = body; - let p_dirty_values = dirty_values; - - let uri_str = format!("{}/collections/{collectionName}/documents/{documentId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), documentId=crate::apis::urlencode(p_document_id)); +pub async fn update_document(configuration: &configuration::Configuration, params: UpdateDocumentParams) -> Result> { + + let uri_str = format!("{}/collections/{collectionName}/documents/{documentId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), documentId=crate::apis::urlencode(params.document_id)); let mut req_builder = configuration.client.request(reqwest::Method::PATCH, &uri_str); - if let Some(ref param_value) = p_dirty_values { + if let Some(ref param_value) = params.dirty_values { req_builder = req_builder.query(&[("dirty_values", ¶m_value.to_string())]); } if let Some(ref user_agent) = configuration.user_agent { @@ -1252,7 +1335,7 @@ pub async fn update_document(configuration: &configuration::Configuration, colle }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_body); + req_builder = req_builder.json(¶ms.body); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; @@ -1280,16 +1363,12 @@ pub async fn update_document(configuration: &configuration::Configuration, colle } /// The filter_by query parameter is used to filter to specify a condition against which the documents are matched. The request body contains the fields that should be updated for any documents that match the filter condition. This endpoint is only available if the Typesense server is version `0.25.0.rc12` or later. -pub async fn update_documents(configuration: &configuration::Configuration, collection_name: &str, body: serde_json::Value, filter_by: Option<&str>) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_body = body; - let p_filter_by = filter_by; +pub async fn update_documents(configuration: &configuration::Configuration, params: UpdateDocumentsParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}/documents", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let uri_str = format!("{}/collections/{collectionName}/documents", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); let mut req_builder = configuration.client.request(reqwest::Method::PATCH, &uri_str); - if let Some(ref param_value) = p_filter_by { + if let Some(ref param_value) = params.filter_by { req_builder = req_builder.query(&[("filter_by", ¶m_value.to_string())]); } if let Some(ref user_agent) = configuration.user_agent { @@ -1303,7 +1382,7 @@ pub async fn update_documents(configuration: &configuration::Configuration, coll }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_body); + req_builder = req_builder.json(¶ms.body); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; @@ -1331,13 +1410,9 @@ pub async fn update_documents(configuration: &configuration::Configuration, coll } /// Create or update an override to promote certain documents over others. Using overrides, you can include or exclude specific documents for a given query. -pub async fn upsert_search_override(configuration: &configuration::Configuration, collection_name: &str, override_id: &str, search_override_schema: models::SearchOverrideSchema) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_override_id = override_id; - let p_search_override_schema = search_override_schema; +pub async fn upsert_search_override(configuration: &configuration::Configuration, params: UpsertSearchOverrideParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), overrideId=crate::apis::urlencode(p_override_id)); + let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), overrideId=crate::apis::urlencode(params.override_id)); let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -1351,7 +1426,7 @@ pub async fn upsert_search_override(configuration: &configuration::Configuration }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_search_override_schema); + req_builder = req_builder.json(¶ms.search_override_schema); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; diff --git a/typesense_codegen/src/apis/health_api.rs b/typesense_codegen/src/apis/health_api.rs index 18fe2a5..9ce4c07 100644 --- a/typesense_codegen/src/apis/health_api.rs +++ b/typesense_codegen/src/apis/health_api.rs @@ -24,7 +24,7 @@ pub enum HealthError { /// Checks if Typesense server is ready to accept requests. -pub async fn health(configuration: &configuration::Configuration, ) -> Result> { +pub async fn health(configuration: &configuration::Configuration) -> Result> { let uri_str = format!("{}/health", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); diff --git a/typesense_codegen/src/apis/keys_api.rs b/typesense_codegen/src/apis/keys_api.rs index fbc0f24..5490f79 100644 --- a/typesense_codegen/src/apis/keys_api.rs +++ b/typesense_codegen/src/apis/keys_api.rs @@ -14,6 +14,27 @@ use serde::{Deserialize, Serialize, de::Error as _}; use crate::{apis::ResponseContent, models}; use super::{Error, configuration, ContentType}; +/// struct for passing parameters to the method [`create_key`] +#[derive(Clone, Debug)] +pub struct CreateKeyParams { + /// The object that describes API key scope + pub api_key_schema: Option +} + +/// struct for passing parameters to the method [`delete_key`] +#[derive(Clone, Debug)] +pub struct DeleteKeyParams { + /// The ID of the key to delete + pub key_id: i64 +} + +/// struct for passing parameters to the method [`get_key`] +#[derive(Clone, Debug)] +pub struct GetKeyParams { + /// The ID of the key to retrieve + pub key_id: i64 +} + /// struct for typed errors of method [`create_key`] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -50,9 +71,7 @@ pub enum GetKeysError { /// Create an API Key with fine-grain access control. You can restrict access on both a per-collection and per-action level. The generated key is returned only during creation. You want to store this key carefully in a secure place. -pub async fn create_key(configuration: &configuration::Configuration, api_key_schema: Option) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_api_key_schema = api_key_schema; +pub async fn create_key(configuration: &configuration::Configuration, params: CreateKeyParams) -> Result> { let uri_str = format!("{}/keys", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); @@ -68,7 +87,7 @@ pub async fn create_key(configuration: &configuration::Configuration, api_key_sc }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_api_key_schema); + req_builder = req_builder.json(¶ms.api_key_schema); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; @@ -95,11 +114,9 @@ pub async fn create_key(configuration: &configuration::Configuration, api_key_sc } } -pub async fn delete_key(configuration: &configuration::Configuration, key_id: i64) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_key_id = key_id; +pub async fn delete_key(configuration: &configuration::Configuration, params: DeleteKeyParams) -> Result> { - let uri_str = format!("{}/keys/{keyId}", configuration.base_path, keyId=p_key_id); + let uri_str = format!("{}/keys/{keyId}", configuration.base_path, keyId=params.key_id); let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -140,11 +157,9 @@ pub async fn delete_key(configuration: &configuration::Configuration, key_id: i6 } /// Retrieve (metadata about) a key. Only the key prefix is returned when you retrieve a key. Due to security reasons, only the create endpoint returns the full API key. -pub async fn get_key(configuration: &configuration::Configuration, key_id: i64) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_key_id = key_id; +pub async fn get_key(configuration: &configuration::Configuration, params: GetKeyParams) -> Result> { - let uri_str = format!("{}/keys/{keyId}", configuration.base_path, keyId=p_key_id); + let uri_str = format!("{}/keys/{keyId}", configuration.base_path, keyId=params.key_id); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -184,7 +199,7 @@ pub async fn get_key(configuration: &configuration::Configuration, key_id: i64) } } -pub async fn get_keys(configuration: &configuration::Configuration, ) -> Result> { +pub async fn get_keys(configuration: &configuration::Configuration) -> Result> { let uri_str = format!("{}/keys", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); diff --git a/typesense_codegen/src/apis/mod.rs b/typesense_codegen/src/apis/mod.rs index e9eb817..070f27a 100644 --- a/typesense_codegen/src/apis/mod.rs +++ b/typesense_codegen/src/apis/mod.rs @@ -11,6 +11,7 @@ pub struct ResponseContent { #[derive(Debug)] pub enum Error { Reqwest(reqwest::Error), + ReqwestMiddleware(reqwest_middleware::Error), Serde(serde_json::Error), Io(std::io::Error), ResponseError(ResponseContent), @@ -20,6 +21,7 @@ impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let (module, e) = match self { Error::Reqwest(e) => ("reqwest", e.to_string()), + Error::ReqwestMiddleware(e) => ("reqwest-middleware", e.to_string()), Error::Serde(e) => ("serde", e.to_string()), Error::Io(e) => ("IO", e.to_string()), Error::ResponseError(e) => ("response", format!("status code {}", e.status)), @@ -32,6 +34,7 @@ impl error::Error for Error { fn source(&self) -> Option<&(dyn error::Error + 'static)> { Some(match self { Error::Reqwest(e) => e, + Error::ReqwestMiddleware(e) => e, Error::Serde(e) => e, Error::Io(e) => e, Error::ResponseError(_) => return None, @@ -45,6 +48,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: reqwest_middleware::Error) -> Self { + Error::ReqwestMiddleware(e) + } +} + impl From for Error { fn from(e: serde_json::Error) -> Self { Error::Serde(e) @@ -119,6 +128,7 @@ pub mod debug_api; pub mod documents_api; pub mod health_api; pub mod keys_api; +pub mod nl_search_models_api; pub mod operations_api; pub mod override_api; pub mod presets_api; diff --git a/typesense_codegen/src/apis/nl_search_models_api.rs b/typesense_codegen/src/apis/nl_search_models_api.rs new file mode 100644 index 0000000..0bf0ee8 --- /dev/null +++ b/typesense_codegen/src/apis/nl_search_models_api.rs @@ -0,0 +1,305 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + + +use reqwest; +use serde::{Deserialize, Serialize, de::Error as _}; +use crate::{apis::ResponseContent, models}; +use super::{Error, configuration, ContentType}; + +/// struct for passing parameters to the method [`create_nl_search_model`] +#[derive(Clone, Debug)] +pub struct CreateNlSearchModelParams { + /// The NL search model to be created + pub nl_search_model_create_schema: models::NlSearchModelCreateSchema +} + +/// struct for passing parameters to the method [`delete_nl_search_model`] +#[derive(Clone, Debug)] +pub struct DeleteNlSearchModelParams { + /// The ID of the NL search model to delete + pub model_id: String +} + +/// struct for passing parameters to the method [`retrieve_nl_search_model`] +#[derive(Clone, Debug)] +pub struct RetrieveNlSearchModelParams { + /// The ID of the NL search model to retrieve + pub model_id: String +} + +/// struct for passing parameters to the method [`update_nl_search_model`] +#[derive(Clone, Debug)] +pub struct UpdateNlSearchModelParams { + /// The ID of the NL search model to update + pub model_id: String, + /// The NL search model fields to update + pub body: models::NlSearchModelCreateSchema +} + + +/// struct for typed errors of method [`create_nl_search_model`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum CreateNlSearchModelError { + Status400(models::ApiResponse), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`delete_nl_search_model`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum DeleteNlSearchModelError { + Status404(models::ApiResponse), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`retrieve_all_nl_search_models`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum RetrieveAllNlSearchModelsError { + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`retrieve_nl_search_model`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum RetrieveNlSearchModelError { + Status404(models::ApiResponse), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`update_nl_search_model`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum UpdateNlSearchModelError { + Status400(models::ApiResponse), + Status404(models::ApiResponse), + UnknownValue(serde_json::Value), +} + + +/// Create a new NL search model. +pub async fn create_nl_search_model(configuration: &configuration::Configuration, params: CreateNlSearchModelParams) -> Result> { + + let uri_str = format!("{}/nl_search_models", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + req_builder = req_builder.json(¶ms.nl_search_model_create_schema); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::NlSearchModelSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::NlSearchModelSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Delete a specific NL search model by its ID. +pub async fn delete_nl_search_model(configuration: &configuration::Configuration, params: DeleteNlSearchModelParams) -> Result> { + + let uri_str = format!("{}/nl_search_models/{modelId}", configuration.base_path, modelId=crate::apis::urlencode(params.model_id)); + let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::NlSearchModelDeleteSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::NlSearchModelDeleteSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Retrieve all NL search models. +pub async fn retrieve_all_nl_search_models(configuration: &configuration::Configuration) -> Result, Error> { + + let uri_str = format!("{}/nl_search_models", configuration.base_path); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `Vec<models::NlSearchModelSchema>`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `Vec<models::NlSearchModelSchema>`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Retrieve a specific NL search model by its ID. +pub async fn retrieve_nl_search_model(configuration: &configuration::Configuration, params: RetrieveNlSearchModelParams) -> Result> { + + let uri_str = format!("{}/nl_search_models/{modelId}", configuration.base_path, modelId=crate::apis::urlencode(params.model_id)); + let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::NlSearchModelSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::NlSearchModelSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + +/// Update an existing NL search model. +pub async fn update_nl_search_model(configuration: &configuration::Configuration, params: UpdateNlSearchModelParams) -> Result> { + + let uri_str = format!("{}/nl_search_models/{modelId}", configuration.base_path, modelId=crate::apis::urlencode(params.model_id)); + let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); + + if let Some(ref user_agent) = configuration.user_agent { + req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); + } + if let Some(ref apikey) = configuration.api_key { + let key = apikey.key.clone(); + let value = match apikey.prefix { + Some(ref prefix) => format!("{} {}", prefix, key), + None => key, + }; + req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); + }; + req_builder = req_builder.json(¶ms.body); + + let req = req_builder.build()?; + let resp = configuration.client.execute(req).await?; + + let status = resp.status(); + let content_type = resp + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()) + .unwrap_or("application/octet-stream"); + let content_type = super::ContentType::from(content_type); + + if !status.is_client_error() && !status.is_server_error() { + let content = resp.text().await?; + match content_type { + ContentType::Json => serde_json::from_str(&content).map_err(Error::from), + ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `models::NlSearchModelSchema`"))), + ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `models::NlSearchModelSchema`")))), + } + } else { + let content = resp.text().await?; + let entity: Option = serde_json::from_str(&content).ok(); + Err(Error::ResponseError(ResponseContent { status, content, entity })) + } +} + diff --git a/typesense_codegen/src/apis/operations_api.rs b/typesense_codegen/src/apis/operations_api.rs index 3c838b3..229c8cd 100644 --- a/typesense_codegen/src/apis/operations_api.rs +++ b/typesense_codegen/src/apis/operations_api.rs @@ -14,6 +14,13 @@ use serde::{Deserialize, Serialize, de::Error as _}; use crate::{apis::ResponseContent, models}; use super::{Error, configuration, ContentType}; +/// struct for passing parameters to the method [`take_snapshot`] +#[derive(Clone, Debug)] +pub struct TakeSnapshotParams { + /// The directory on the server where the snapshot should be saved. + pub snapshot_path: String +} + /// struct for typed errors of method [`get_schema_changes`] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -52,7 +59,7 @@ pub enum VoteError { /// Returns the status of any ongoing schema change operations. If no schema changes are in progress, returns an empty response. -pub async fn get_schema_changes(configuration: &configuration::Configuration, ) -> Result, Error> { +pub async fn get_schema_changes(configuration: &configuration::Configuration) -> Result, Error> { let uri_str = format!("{}/operations/schema_changes", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); @@ -95,7 +102,7 @@ pub async fn get_schema_changes(configuration: &configuration::Configuration, ) } /// Retrieve the stats about API endpoints. -pub async fn retrieve_api_stats(configuration: &configuration::Configuration, ) -> Result> { +pub async fn retrieve_api_stats(configuration: &configuration::Configuration) -> Result> { let uri_str = format!("{}/stats.json", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); @@ -138,7 +145,7 @@ pub async fn retrieve_api_stats(configuration: &configuration::Configuration, ) } /// Retrieve the metrics. -pub async fn retrieve_metrics(configuration: &configuration::Configuration, ) -> Result> { +pub async fn retrieve_metrics(configuration: &configuration::Configuration) -> Result> { let uri_str = format!("{}/metrics.json", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); @@ -181,14 +188,12 @@ pub async fn retrieve_metrics(configuration: &configuration::Configuration, ) -> } /// Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. You can then backup the snapshot directory that gets created and later restore it as a data directory, as needed. -pub async fn take_snapshot(configuration: &configuration::Configuration, snapshot_path: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_snapshot_path = snapshot_path; +pub async fn take_snapshot(configuration: &configuration::Configuration, params: TakeSnapshotParams) -> Result> { let uri_str = format!("{}/operations/snapshot", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); - req_builder = req_builder.query(&[("snapshot_path", &p_snapshot_path.to_string())]); + req_builder = req_builder.query(&[("snapshot_path", ¶ms.snapshot_path.to_string())]); if let Some(ref user_agent) = configuration.user_agent { req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } @@ -227,7 +232,7 @@ pub async fn take_snapshot(configuration: &configuration::Configuration, snapsho } /// Triggers a follower node to initiate the raft voting process, which triggers leader re-election. The follower node that you run this operation against will become the new leader, once this command succeeds. -pub async fn vote(configuration: &configuration::Configuration, ) -> Result> { +pub async fn vote(configuration: &configuration::Configuration) -> Result> { let uri_str = format!("{}/operations/vote", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); diff --git a/typesense_codegen/src/apis/override_api.rs b/typesense_codegen/src/apis/override_api.rs index fca6378..be1b76f 100644 --- a/typesense_codegen/src/apis/override_api.rs +++ b/typesense_codegen/src/apis/override_api.rs @@ -14,6 +14,15 @@ use serde::{Deserialize, Serialize, de::Error as _}; use crate::{apis::ResponseContent, models}; use super::{Error, configuration, ContentType}; +/// struct for passing parameters to the method [`get_search_override`] +#[derive(Clone, Debug)] +pub struct GetSearchOverrideParams { + /// The name of the collection + pub collection_name: String, + /// The id of the search override + pub override_id: String +} + /// struct for typed errors of method [`get_search_override`] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -24,12 +33,9 @@ pub enum GetSearchOverrideError { /// Retrieve the details of a search override, given its id. -pub async fn get_search_override(configuration: &configuration::Configuration, collection_name: &str, override_id: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_override_id = override_id; +pub async fn get_search_override(configuration: &configuration::Configuration, params: GetSearchOverrideParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), overrideId=crate::apis::urlencode(p_override_id)); + let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), overrideId=crate::apis::urlencode(params.override_id)); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { diff --git a/typesense_codegen/src/apis/presets_api.rs b/typesense_codegen/src/apis/presets_api.rs index f2f1eb4..f90dd81 100644 --- a/typesense_codegen/src/apis/presets_api.rs +++ b/typesense_codegen/src/apis/presets_api.rs @@ -14,6 +14,29 @@ use serde::{Deserialize, Serialize, de::Error as _}; use crate::{apis::ResponseContent, models}; use super::{Error, configuration, ContentType}; +/// struct for passing parameters to the method [`delete_preset`] +#[derive(Clone, Debug)] +pub struct DeletePresetParams { + /// The ID of the preset to delete. + pub preset_id: String +} + +/// struct for passing parameters to the method [`retrieve_preset`] +#[derive(Clone, Debug)] +pub struct RetrievePresetParams { + /// The ID of the preset to retrieve. + pub preset_id: String +} + +/// struct for passing parameters to the method [`upsert_preset`] +#[derive(Clone, Debug)] +pub struct UpsertPresetParams { + /// The name of the preset set to upsert. + pub preset_id: String, + /// The stopwords set to upsert. + pub preset_upsert_schema: models::PresetUpsertSchema +} + /// struct for typed errors of method [`delete_preset`] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -48,11 +71,9 @@ pub enum UpsertPresetError { /// Permanently deletes a preset, given it's name. -pub async fn delete_preset(configuration: &configuration::Configuration, preset_id: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_preset_id = preset_id; +pub async fn delete_preset(configuration: &configuration::Configuration, params: DeletePresetParams) -> Result> { - let uri_str = format!("{}/presets/{presetId}", configuration.base_path, presetId=crate::apis::urlencode(p_preset_id)); + let uri_str = format!("{}/presets/{presetId}", configuration.base_path, presetId=crate::apis::urlencode(params.preset_id)); let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -93,7 +114,7 @@ pub async fn delete_preset(configuration: &configuration::Configuration, preset_ } /// Retrieve the details of all presets -pub async fn retrieve_all_presets(configuration: &configuration::Configuration, ) -> Result> { +pub async fn retrieve_all_presets(configuration: &configuration::Configuration) -> Result> { let uri_str = format!("{}/presets", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); @@ -136,11 +157,9 @@ pub async fn retrieve_all_presets(configuration: &configuration::Configuration, } /// Retrieve the details of a preset, given it's name. -pub async fn retrieve_preset(configuration: &configuration::Configuration, preset_id: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_preset_id = preset_id; +pub async fn retrieve_preset(configuration: &configuration::Configuration, params: RetrievePresetParams) -> Result> { - let uri_str = format!("{}/presets/{presetId}", configuration.base_path, presetId=crate::apis::urlencode(p_preset_id)); + let uri_str = format!("{}/presets/{presetId}", configuration.base_path, presetId=crate::apis::urlencode(params.preset_id)); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -181,12 +200,9 @@ pub async fn retrieve_preset(configuration: &configuration::Configuration, prese } /// Create or update an existing preset. -pub async fn upsert_preset(configuration: &configuration::Configuration, preset_id: &str, preset_upsert_schema: models::PresetUpsertSchema) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_preset_id = preset_id; - let p_preset_upsert_schema = preset_upsert_schema; +pub async fn upsert_preset(configuration: &configuration::Configuration, params: UpsertPresetParams) -> Result> { - let uri_str = format!("{}/presets/{presetId}", configuration.base_path, presetId=crate::apis::urlencode(p_preset_id)); + let uri_str = format!("{}/presets/{presetId}", configuration.base_path, presetId=crate::apis::urlencode(params.preset_id)); let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -200,7 +216,7 @@ pub async fn upsert_preset(configuration: &configuration::Configuration, preset_ }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_preset_upsert_schema); + req_builder = req_builder.json(¶ms.preset_upsert_schema); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; diff --git a/typesense_codegen/src/apis/stemming_api.rs b/typesense_codegen/src/apis/stemming_api.rs index e3b59ae..2432031 100644 --- a/typesense_codegen/src/apis/stemming_api.rs +++ b/typesense_codegen/src/apis/stemming_api.rs @@ -14,6 +14,22 @@ use serde::{Deserialize, Serialize, de::Error as _}; use crate::{apis::ResponseContent, models}; use super::{Error, configuration, ContentType}; +/// struct for passing parameters to the method [`get_stemming_dictionary`] +#[derive(Clone, Debug)] +pub struct GetStemmingDictionaryParams { + /// The ID of the dictionary to retrieve + pub dictionary_id: String +} + +/// struct for passing parameters to the method [`import_stemming_dictionary`] +#[derive(Clone, Debug)] +pub struct ImportStemmingDictionaryParams { + /// The ID to assign to the dictionary + pub id: String, + /// The JSONL file containing word mappings + pub body: String +} + /// struct for typed errors of method [`get_stemming_dictionary`] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -40,11 +56,9 @@ pub enum ListStemmingDictionariesError { /// Fetch details of a specific stemming dictionary. -pub async fn get_stemming_dictionary(configuration: &configuration::Configuration, dictionary_id: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_dictionary_id = dictionary_id; +pub async fn get_stemming_dictionary(configuration: &configuration::Configuration, params: GetStemmingDictionaryParams) -> Result> { - let uri_str = format!("{}/stemming/dictionaries/{dictionaryId}", configuration.base_path, dictionaryId=crate::apis::urlencode(p_dictionary_id)); + let uri_str = format!("{}/stemming/dictionaries/{dictionaryId}", configuration.base_path, dictionaryId=crate::apis::urlencode(params.dictionary_id)); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -85,15 +99,12 @@ pub async fn get_stemming_dictionary(configuration: &configuration::Configuratio } /// Upload a JSONL file containing word mappings to create or update a stemming dictionary. -pub async fn import_stemming_dictionary(configuration: &configuration::Configuration, id: &str, body: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_id = id; - let p_body = body; +pub async fn import_stemming_dictionary(configuration: &configuration::Configuration, params: ImportStemmingDictionaryParams) -> Result> { let uri_str = format!("{}/stemming/dictionaries/import", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); - req_builder = req_builder.query(&[("id", &p_id.to_string())]); + req_builder = req_builder.query(&[("id", ¶ms.id.to_string())]); if let Some(ref user_agent) = configuration.user_agent { req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); } @@ -105,7 +116,7 @@ pub async fn import_stemming_dictionary(configuration: &configuration::Configura }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_body); + req_builder = req_builder.json(¶ms.body); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; @@ -133,7 +144,7 @@ pub async fn import_stemming_dictionary(configuration: &configuration::Configura } /// Retrieve a list of all available stemming dictionaries. -pub async fn list_stemming_dictionaries(configuration: &configuration::Configuration, ) -> Result> { +pub async fn list_stemming_dictionaries(configuration: &configuration::Configuration) -> Result> { let uri_str = format!("{}/stemming/dictionaries", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); diff --git a/typesense_codegen/src/apis/stopwords_api.rs b/typesense_codegen/src/apis/stopwords_api.rs index 8889f3f..e89df6e 100644 --- a/typesense_codegen/src/apis/stopwords_api.rs +++ b/typesense_codegen/src/apis/stopwords_api.rs @@ -14,6 +14,29 @@ use serde::{Deserialize, Serialize, de::Error as _}; use crate::{apis::ResponseContent, models}; use super::{Error, configuration, ContentType}; +/// struct for passing parameters to the method [`delete_stopwords_set`] +#[derive(Clone, Debug)] +pub struct DeleteStopwordsSetParams { + /// The ID of the stopwords set to delete. + pub set_id: String +} + +/// struct for passing parameters to the method [`retrieve_stopwords_set`] +#[derive(Clone, Debug)] +pub struct RetrieveStopwordsSetParams { + /// The ID of the stopwords set to retrieve. + pub set_id: String +} + +/// struct for passing parameters to the method [`upsert_stopwords_set`] +#[derive(Clone, Debug)] +pub struct UpsertStopwordsSetParams { + /// The ID of the stopwords set to upsert. + pub set_id: String, + /// The stopwords set to upsert. + pub stopwords_set_upsert_schema: models::StopwordsSetUpsertSchema +} + /// struct for typed errors of method [`delete_stopwords_set`] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -48,11 +71,9 @@ pub enum UpsertStopwordsSetError { /// Permanently deletes a stopwords set, given it's name. -pub async fn delete_stopwords_set(configuration: &configuration::Configuration, set_id: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_set_id = set_id; +pub async fn delete_stopwords_set(configuration: &configuration::Configuration, params: DeleteStopwordsSetParams) -> Result> { - let uri_str = format!("{}/stopwords/{setId}", configuration.base_path, setId=crate::apis::urlencode(p_set_id)); + let uri_str = format!("{}/stopwords/{setId}", configuration.base_path, setId=crate::apis::urlencode(params.set_id)); let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -93,11 +114,9 @@ pub async fn delete_stopwords_set(configuration: &configuration::Configuration, } /// Retrieve the details of a stopwords set, given it's name. -pub async fn retrieve_stopwords_set(configuration: &configuration::Configuration, set_id: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_set_id = set_id; +pub async fn retrieve_stopwords_set(configuration: &configuration::Configuration, params: RetrieveStopwordsSetParams) -> Result> { - let uri_str = format!("{}/stopwords/{setId}", configuration.base_path, setId=crate::apis::urlencode(p_set_id)); + let uri_str = format!("{}/stopwords/{setId}", configuration.base_path, setId=crate::apis::urlencode(params.set_id)); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -138,7 +157,7 @@ pub async fn retrieve_stopwords_set(configuration: &configuration::Configuration } /// Retrieve the details of all stopwords sets -pub async fn retrieve_stopwords_sets(configuration: &configuration::Configuration, ) -> Result> { +pub async fn retrieve_stopwords_sets(configuration: &configuration::Configuration) -> Result> { let uri_str = format!("{}/stopwords", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); @@ -181,12 +200,9 @@ pub async fn retrieve_stopwords_sets(configuration: &configuration::Configuratio } /// When an analytics rule is created, we give it a name and describe the type, the source collections and the destination collection. -pub async fn upsert_stopwords_set(configuration: &configuration::Configuration, set_id: &str, stopwords_set_upsert_schema: models::StopwordsSetUpsertSchema) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_set_id = set_id; - let p_stopwords_set_upsert_schema = stopwords_set_upsert_schema; +pub async fn upsert_stopwords_set(configuration: &configuration::Configuration, params: UpsertStopwordsSetParams) -> Result> { - let uri_str = format!("{}/stopwords/{setId}", configuration.base_path, setId=crate::apis::urlencode(p_set_id)); + let uri_str = format!("{}/stopwords/{setId}", configuration.base_path, setId=crate::apis::urlencode(params.set_id)); let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -200,7 +216,7 @@ pub async fn upsert_stopwords_set(configuration: &configuration::Configuration, }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_stopwords_set_upsert_schema); + req_builder = req_builder.json(¶ms.stopwords_set_upsert_schema); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; diff --git a/typesense_codegen/src/apis/synonyms_api.rs b/typesense_codegen/src/apis/synonyms_api.rs index 39b3130..12429a3 100644 --- a/typesense_codegen/src/apis/synonyms_api.rs +++ b/typesense_codegen/src/apis/synonyms_api.rs @@ -14,6 +14,42 @@ use serde::{Deserialize, Serialize, de::Error as _}; use crate::{apis::ResponseContent, models}; use super::{Error, configuration, ContentType}; +/// struct for passing parameters to the method [`delete_search_synonym`] +#[derive(Clone, Debug)] +pub struct DeleteSearchSynonymParams { + /// The name of the collection + pub collection_name: String, + /// The ID of the search synonym to delete + pub synonym_id: String +} + +/// struct for passing parameters to the method [`get_search_synonym`] +#[derive(Clone, Debug)] +pub struct GetSearchSynonymParams { + /// The name of the collection + pub collection_name: String, + /// The id of the search synonym + pub synonym_id: String +} + +/// struct for passing parameters to the method [`get_search_synonyms`] +#[derive(Clone, Debug)] +pub struct GetSearchSynonymsParams { + /// The name of the collection + pub collection_name: String +} + +/// struct for passing parameters to the method [`upsert_search_synonym`] +#[derive(Clone, Debug)] +pub struct UpsertSearchSynonymParams { + /// The name of the collection + pub collection_name: String, + /// The ID of the search synonym to create/update + pub synonym_id: String, + /// The search synonym object to be created/updated + pub search_synonym_schema: models::SearchSynonymSchema +} + /// struct for typed errors of method [`delete_search_synonym`] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -48,12 +84,9 @@ pub enum UpsertSearchSynonymError { } -pub async fn delete_search_synonym(configuration: &configuration::Configuration, collection_name: &str, synonym_id: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_synonym_id = synonym_id; +pub async fn delete_search_synonym(configuration: &configuration::Configuration, params: DeleteSearchSynonymParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}/synonyms/{synonymId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), synonymId=crate::apis::urlencode(p_synonym_id)); + let uri_str = format!("{}/collections/{collectionName}/synonyms/{synonymId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), synonymId=crate::apis::urlencode(params.synonym_id)); let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -94,12 +127,9 @@ pub async fn delete_search_synonym(configuration: &configuration::Configuration, } /// Retrieve the details of a search synonym, given its id. -pub async fn get_search_synonym(configuration: &configuration::Configuration, collection_name: &str, synonym_id: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_synonym_id = synonym_id; +pub async fn get_search_synonym(configuration: &configuration::Configuration, params: GetSearchSynonymParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}/synonyms/{synonymId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), synonymId=crate::apis::urlencode(p_synonym_id)); + let uri_str = format!("{}/collections/{collectionName}/synonyms/{synonymId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), synonymId=crate::apis::urlencode(params.synonym_id)); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -139,11 +169,9 @@ pub async fn get_search_synonym(configuration: &configuration::Configuration, co } } -pub async fn get_search_synonyms(configuration: &configuration::Configuration, collection_name: &str) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; +pub async fn get_search_synonyms(configuration: &configuration::Configuration, params: GetSearchSynonymsParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}/synonyms", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name)); + let uri_str = format!("{}/collections/{collectionName}/synonyms", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -184,13 +212,9 @@ pub async fn get_search_synonyms(configuration: &configuration::Configuration, c } /// Create or update a synonym to define search terms that should be considered equivalent. -pub async fn upsert_search_synonym(configuration: &configuration::Configuration, collection_name: &str, synonym_id: &str, search_synonym_schema: models::SearchSynonymSchema) -> Result> { - // add a prefix to parameters to efficiently prevent name collisions - let p_collection_name = collection_name; - let p_synonym_id = synonym_id; - let p_search_synonym_schema = search_synonym_schema; +pub async fn upsert_search_synonym(configuration: &configuration::Configuration, params: UpsertSearchSynonymParams) -> Result> { - let uri_str = format!("{}/collections/{collectionName}/synonyms/{synonymId}", configuration.base_path, collectionName=crate::apis::urlencode(p_collection_name), synonymId=crate::apis::urlencode(p_synonym_id)); + let uri_str = format!("{}/collections/{collectionName}/synonyms/{synonymId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), synonymId=crate::apis::urlencode(params.synonym_id)); let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -204,7 +228,7 @@ pub async fn upsert_search_synonym(configuration: &configuration::Configuration, }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(&p_search_synonym_schema); + req_builder = req_builder.json(¶ms.search_synonym_schema); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; diff --git a/typesense_codegen/src/models/analytics_event_create_schema.rs b/typesense_codegen/src/models/analytics_event_create_schema.rs index 335ee31..ff08423 100644 --- a/typesense_codegen/src/models/analytics_event_create_schema.rs +++ b/typesense_codegen/src/models/analytics_event_create_schema.rs @@ -13,20 +13,20 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct AnalyticsEventCreateSchema { - #[serde(rename = "data")] - pub data: serde_json::Value, - #[serde(rename = "name")] - pub name: String, #[serde(rename = "type")] pub r#type: String, + #[serde(rename = "name")] + pub name: String, + #[serde(rename = "data")] + pub data: serde_json::Value, } impl AnalyticsEventCreateSchema { - pub fn new(data: serde_json::Value, name: String, r#type: String) -> AnalyticsEventCreateSchema { + pub fn new(r#type: String, name: String, data: serde_json::Value) -> AnalyticsEventCreateSchema { AnalyticsEventCreateSchema { - data, - name, r#type, + name, + data, } } } diff --git a/typesense_codegen/src/models/analytics_rule_parameters.rs b/typesense_codegen/src/models/analytics_rule_parameters.rs index c46ff8b..8973e98 100644 --- a/typesense_codegen/src/models/analytics_rule_parameters.rs +++ b/typesense_codegen/src/models/analytics_rule_parameters.rs @@ -13,23 +13,23 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct AnalyticsRuleParameters { + #[serde(rename = "source")] + pub source: Box, #[serde(rename = "destination")] pub destination: Box, - #[serde(rename = "expand_query", skip_serializing_if = "Option::is_none")] - pub expand_query: Option, #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] pub limit: Option, - #[serde(rename = "source")] - pub source: Box, + #[serde(rename = "expand_query", skip_serializing_if = "Option::is_none")] + pub expand_query: Option, } impl AnalyticsRuleParameters { - pub fn new(destination: models::AnalyticsRuleParametersDestination, source: models::AnalyticsRuleParametersSource) -> AnalyticsRuleParameters { + pub fn new(source: models::AnalyticsRuleParametersSource, destination: models::AnalyticsRuleParametersDestination) -> AnalyticsRuleParameters { AnalyticsRuleParameters { + source: Box::new(source), destination: Box::new(destination), - expand_query: None, limit: None, - source: Box::new(source), + expand_query: None, } } } diff --git a/typesense_codegen/src/models/analytics_rule_parameters_source_events_inner.rs b/typesense_codegen/src/models/analytics_rule_parameters_source_events_inner.rs index da0a8ae..083e6d2 100644 --- a/typesense_codegen/src/models/analytics_rule_parameters_source_events_inner.rs +++ b/typesense_codegen/src/models/analytics_rule_parameters_source_events_inner.rs @@ -13,20 +13,20 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct AnalyticsRuleParametersSourceEventsInner { - #[serde(rename = "name")] - pub name: String, #[serde(rename = "type")] pub r#type: String, #[serde(rename = "weight")] pub weight: f32, + #[serde(rename = "name")] + pub name: String, } impl AnalyticsRuleParametersSourceEventsInner { - pub fn new(name: String, r#type: String, weight: f32) -> AnalyticsRuleParametersSourceEventsInner { + pub fn new(r#type: String, weight: f32, name: String) -> AnalyticsRuleParametersSourceEventsInner { AnalyticsRuleParametersSourceEventsInner { - name, r#type, weight, + name, } } } diff --git a/typesense_codegen/src/models/analytics_rule_schema.rs b/typesense_codegen/src/models/analytics_rule_schema.rs index e1e1cd2..6e5d2e2 100644 --- a/typesense_codegen/src/models/analytics_rule_schema.rs +++ b/typesense_codegen/src/models/analytics_rule_schema.rs @@ -13,19 +13,19 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct AnalyticsRuleSchema { - #[serde(rename = "params")] - pub params: Box, #[serde(rename = "type")] pub r#type: Type, + #[serde(rename = "params")] + pub params: Box, #[serde(rename = "name")] pub name: String, } impl AnalyticsRuleSchema { - pub fn new(params: models::AnalyticsRuleParameters, r#type: Type, name: String) -> AnalyticsRuleSchema { + pub fn new(r#type: Type, params: models::AnalyticsRuleParameters, name: String) -> AnalyticsRuleSchema { AnalyticsRuleSchema { - params: Box::new(params), r#type, + params: Box::new(params), name, } } diff --git a/typesense_codegen/src/models/analytics_rule_upsert_schema.rs b/typesense_codegen/src/models/analytics_rule_upsert_schema.rs index ecd1f80..f4a31d7 100644 --- a/typesense_codegen/src/models/analytics_rule_upsert_schema.rs +++ b/typesense_codegen/src/models/analytics_rule_upsert_schema.rs @@ -13,17 +13,17 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct AnalyticsRuleUpsertSchema { - #[serde(rename = "params")] - pub params: Box, #[serde(rename = "type")] pub r#type: Type, + #[serde(rename = "params")] + pub params: Box, } impl AnalyticsRuleUpsertSchema { - pub fn new(params: models::AnalyticsRuleParameters, r#type: Type) -> AnalyticsRuleUpsertSchema { + pub fn new(r#type: Type, params: models::AnalyticsRuleParameters) -> AnalyticsRuleUpsertSchema { AnalyticsRuleUpsertSchema { - params: Box::new(params), r#type, + params: Box::new(params), } } } diff --git a/typesense_codegen/src/models/api_key.rs b/typesense_codegen/src/models/api_key.rs index a4a771d..687614c 100644 --- a/typesense_codegen/src/models/api_key.rs +++ b/typesense_codegen/src/models/api_key.rs @@ -13,16 +13,16 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct ApiKey { + #[serde(rename = "value", skip_serializing_if = "Option::is_none")] + pub value: Option, + #[serde(rename = "description")] + pub description: String, #[serde(rename = "actions")] pub actions: Vec, #[serde(rename = "collections")] pub collections: Vec, - #[serde(rename = "description")] - pub description: String, #[serde(rename = "expires_at", skip_serializing_if = "Option::is_none")] pub expires_at: Option, - #[serde(rename = "value", skip_serializing_if = "Option::is_none")] - pub value: Option, #[serde(rename = "id", skip_serializing_if = "Option::is_none")] pub id: Option, #[serde(rename = "value_prefix", skip_serializing_if = "Option::is_none")] @@ -30,13 +30,13 @@ pub struct ApiKey { } impl ApiKey { - pub fn new(actions: Vec, collections: Vec, description: String) -> ApiKey { + pub fn new(description: String, actions: Vec, collections: Vec) -> ApiKey { ApiKey { + value: None, + description, actions, collections, - description, expires_at: None, - value: None, id: None, value_prefix: None, } diff --git a/typesense_codegen/src/models/api_key_schema.rs b/typesense_codegen/src/models/api_key_schema.rs index fcddd8b..5d2922a 100644 --- a/typesense_codegen/src/models/api_key_schema.rs +++ b/typesense_codegen/src/models/api_key_schema.rs @@ -13,26 +13,26 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct ApiKeySchema { + #[serde(rename = "value", skip_serializing_if = "Option::is_none")] + pub value: Option, + #[serde(rename = "description")] + pub description: String, #[serde(rename = "actions")] pub actions: Vec, #[serde(rename = "collections")] pub collections: Vec, - #[serde(rename = "description")] - pub description: String, #[serde(rename = "expires_at", skip_serializing_if = "Option::is_none")] pub expires_at: Option, - #[serde(rename = "value", skip_serializing_if = "Option::is_none")] - pub value: Option, } impl ApiKeySchema { - pub fn new(actions: Vec, collections: Vec, description: String) -> ApiKeySchema { + pub fn new(description: String, actions: Vec, collections: Vec) -> ApiKeySchema { ApiKeySchema { + value: None, + description, actions, collections, - description, expires_at: None, - value: None, } } } diff --git a/typesense_codegen/src/models/collection_alias.rs b/typesense_codegen/src/models/collection_alias.rs index 2c92dfa..d25ff0c 100644 --- a/typesense_codegen/src/models/collection_alias.rs +++ b/typesense_codegen/src/models/collection_alias.rs @@ -13,19 +13,19 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct CollectionAlias { - /// Name of the collection the alias mapped to - #[serde(rename = "collection_name")] - pub collection_name: String, /// Name of the collection alias #[serde(rename = "name")] pub name: String, + /// Name of the collection the alias mapped to + #[serde(rename = "collection_name")] + pub collection_name: String, } impl CollectionAlias { - pub fn new(collection_name: String, name: String) -> CollectionAlias { + pub fn new(name: String, collection_name: String) -> CollectionAlias { CollectionAlias { - collection_name, name, + collection_name, } } } diff --git a/typesense_codegen/src/models/collection_response.rs b/typesense_codegen/src/models/collection_response.rs index fcd3cb1..4c56f37 100644 --- a/typesense_codegen/src/models/collection_response.rs +++ b/typesense_codegen/src/models/collection_response.rs @@ -13,46 +13,46 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct CollectionResponse { + /// Name of the collection + #[serde(rename = "name")] + pub name: String, + /// A list of fields for querying, filtering and faceting + #[serde(rename = "fields")] + pub fields: Vec, /// The name of an int32 / float field that determines the order in which the search results are ranked when a sort_by clause is not provided during searching. This field must indicate some kind of popularity. #[serde(rename = "default_sorting_field", skip_serializing_if = "Option::is_none")] pub default_sorting_field: Option, + /// List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. + #[serde(rename = "token_separators", skip_serializing_if = "Option::is_none")] + pub token_separators: Option>, /// Enables experimental support at a collection level for nested object or object array fields. This field is only available if the Typesense server is version `0.24.0.rcn34` or later. #[serde(rename = "enable_nested_fields", skip_serializing_if = "Option::is_none")] pub enable_nested_fields: Option, - /// A list of fields for querying, filtering and faceting - #[serde(rename = "fields")] - pub fields: Vec, - /// Name of the collection - #[serde(rename = "name")] - pub name: String, /// List of symbols or special characters to be indexed. #[serde(rename = "symbols_to_index", skip_serializing_if = "Option::is_none")] pub symbols_to_index: Option>, - /// List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. - #[serde(rename = "token_separators", skip_serializing_if = "Option::is_none")] - pub token_separators: Option>, #[serde(rename = "voice_query_model", skip_serializing_if = "Option::is_none")] pub voice_query_model: Option>, - /// Timestamp of when the collection was created (Unix epoch in seconds) - #[serde(rename = "created_at")] - pub created_at: i64, /// Number of documents in the collection #[serde(rename = "num_documents")] pub num_documents: i64, + /// Timestamp of when the collection was created (Unix epoch in seconds) + #[serde(rename = "created_at")] + pub created_at: i64, } impl CollectionResponse { - pub fn new(fields: Vec, name: String, created_at: i64, num_documents: i64) -> CollectionResponse { + pub fn new(name: String, fields: Vec, num_documents: i64, created_at: i64) -> CollectionResponse { CollectionResponse { + name, + fields, default_sorting_field: None, + token_separators: None, enable_nested_fields: None, - fields, - name, symbols_to_index: None, - token_separators: None, voice_query_model: None, - created_at, num_documents, + created_at, } } } diff --git a/typesense_codegen/src/models/collection_schema.rs b/typesense_codegen/src/models/collection_schema.rs index 544aff9..c3375ef 100644 --- a/typesense_codegen/src/models/collection_schema.rs +++ b/typesense_codegen/src/models/collection_schema.rs @@ -13,37 +13,37 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct CollectionSchema { + /// Name of the collection + #[serde(rename = "name")] + pub name: String, + /// A list of fields for querying, filtering and faceting + #[serde(rename = "fields")] + pub fields: Vec, /// The name of an int32 / float field that determines the order in which the search results are ranked when a sort_by clause is not provided during searching. This field must indicate some kind of popularity. #[serde(rename = "default_sorting_field", skip_serializing_if = "Option::is_none")] pub default_sorting_field: Option, + /// List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. + #[serde(rename = "token_separators", skip_serializing_if = "Option::is_none")] + pub token_separators: Option>, /// Enables experimental support at a collection level for nested object or object array fields. This field is only available if the Typesense server is version `0.24.0.rcn34` or later. #[serde(rename = "enable_nested_fields", skip_serializing_if = "Option::is_none")] pub enable_nested_fields: Option, - /// A list of fields for querying, filtering and faceting - #[serde(rename = "fields")] - pub fields: Vec, - /// Name of the collection - #[serde(rename = "name")] - pub name: String, /// List of symbols or special characters to be indexed. #[serde(rename = "symbols_to_index", skip_serializing_if = "Option::is_none")] pub symbols_to_index: Option>, - /// List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. - #[serde(rename = "token_separators", skip_serializing_if = "Option::is_none")] - pub token_separators: Option>, #[serde(rename = "voice_query_model", skip_serializing_if = "Option::is_none")] pub voice_query_model: Option>, } impl CollectionSchema { - pub fn new(fields: Vec, name: String) -> CollectionSchema { + pub fn new(name: String, fields: Vec) -> CollectionSchema { CollectionSchema { + name, + fields, default_sorting_field: None, + token_separators: None, enable_nested_fields: None, - fields, - name, symbols_to_index: None, - token_separators: None, voice_query_model: None, } } diff --git a/typesense_codegen/src/models/conversation_model_create_schema.rs b/typesense_codegen/src/models/conversation_model_create_schema.rs index 2442897..3ae1f37 100644 --- a/typesense_codegen/src/models/conversation_model_create_schema.rs +++ b/typesense_codegen/src/models/conversation_model_create_schema.rs @@ -13,46 +13,46 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct ConversationModelCreateSchema { - /// LLM service's account ID (only applicable for Cloudflare) - #[serde(rename = "account_id", skip_serializing_if = "Option::is_none")] - pub account_id: Option, - /// The LLM service's API Key - #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] - pub api_key: Option, - /// Typesense collection that stores the historical conversations - #[serde(rename = "history_collection")] - pub history_collection: String, /// An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. #[serde(rename = "id", skip_serializing_if = "Option::is_none")] pub id: Option, - /// The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. - #[serde(rename = "max_bytes")] - pub max_bytes: i32, /// Name of the LLM model offered by OpenAI, Cloudflare or vLLM #[serde(rename = "model_name")] pub model_name: String, + /// The LLM service's API Key + #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] + pub api_key: Option, + /// Typesense collection that stores the historical conversations + #[serde(rename = "history_collection")] + pub history_collection: String, + /// LLM service's account ID (only applicable for Cloudflare) + #[serde(rename = "account_id", skip_serializing_if = "Option::is_none")] + pub account_id: Option, /// The system prompt that contains special instructions to the LLM #[serde(rename = "system_prompt", skip_serializing_if = "Option::is_none")] pub system_prompt: Option, /// Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) #[serde(rename = "ttl", skip_serializing_if = "Option::is_none")] pub ttl: Option, + /// The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. + #[serde(rename = "max_bytes")] + pub max_bytes: i32, /// URL of vLLM service #[serde(rename = "vllm_url", skip_serializing_if = "Option::is_none")] pub vllm_url: Option, } impl ConversationModelCreateSchema { - pub fn new(history_collection: String, max_bytes: i32, model_name: String) -> ConversationModelCreateSchema { + pub fn new(model_name: String, history_collection: String, max_bytes: i32) -> ConversationModelCreateSchema { ConversationModelCreateSchema { - account_id: None, - api_key: None, - history_collection, id: None, - max_bytes, model_name, + api_key: None, + history_collection, + account_id: None, system_prompt: None, ttl: None, + max_bytes, vllm_url: None, } } diff --git a/typesense_codegen/src/models/conversation_model_schema.rs b/typesense_codegen/src/models/conversation_model_schema.rs index 5d004f2..271c58d 100644 --- a/typesense_codegen/src/models/conversation_model_schema.rs +++ b/typesense_codegen/src/models/conversation_model_schema.rs @@ -13,46 +13,46 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct ConversationModelSchema { - /// LLM service's account ID (only applicable for Cloudflare) - #[serde(rename = "account_id", skip_serializing_if = "Option::is_none")] - pub account_id: Option, - /// The LLM service's API Key - #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] - pub api_key: Option, - /// Typesense collection that stores the historical conversations - #[serde(rename = "history_collection")] - pub history_collection: String, /// An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. #[serde(rename = "id")] pub id: String, - /// The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. - #[serde(rename = "max_bytes")] - pub max_bytes: i32, /// Name of the LLM model offered by OpenAI, Cloudflare or vLLM #[serde(rename = "model_name")] pub model_name: String, + /// The LLM service's API Key + #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] + pub api_key: Option, + /// Typesense collection that stores the historical conversations + #[serde(rename = "history_collection")] + pub history_collection: String, + /// LLM service's account ID (only applicable for Cloudflare) + #[serde(rename = "account_id", skip_serializing_if = "Option::is_none")] + pub account_id: Option, /// The system prompt that contains special instructions to the LLM #[serde(rename = "system_prompt", skip_serializing_if = "Option::is_none")] pub system_prompt: Option, /// Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) #[serde(rename = "ttl", skip_serializing_if = "Option::is_none")] pub ttl: Option, + /// The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. + #[serde(rename = "max_bytes")] + pub max_bytes: i32, /// URL of vLLM service #[serde(rename = "vllm_url", skip_serializing_if = "Option::is_none")] pub vllm_url: Option, } impl ConversationModelSchema { - pub fn new(history_collection: String, id: String, max_bytes: i32, model_name: String) -> ConversationModelSchema { + pub fn new(id: String, model_name: String, history_collection: String, max_bytes: i32) -> ConversationModelSchema { ConversationModelSchema { - account_id: None, - api_key: None, - history_collection, id, - max_bytes, model_name, + api_key: None, + history_collection, + account_id: None, system_prompt: None, ttl: None, + max_bytes, vllm_url: None, } } diff --git a/typesense_codegen/src/models/conversation_model_update_schema.rs b/typesense_codegen/src/models/conversation_model_update_schema.rs index e02b0ba..1a05887 100644 --- a/typesense_codegen/src/models/conversation_model_update_schema.rs +++ b/typesense_codegen/src/models/conversation_model_update_schema.rs @@ -13,30 +13,30 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct ConversationModelUpdateSchema { - /// LLM service's account ID (only applicable for Cloudflare) - #[serde(rename = "account_id", skip_serializing_if = "Option::is_none")] - pub account_id: Option, - /// The LLM service's API Key - #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] - pub api_key: Option, - /// Typesense collection that stores the historical conversations - #[serde(rename = "history_collection", skip_serializing_if = "Option::is_none")] - pub history_collection: Option, /// An explicit id for the model, otherwise the API will return a response with an auto-generated conversation model id. #[serde(rename = "id", skip_serializing_if = "Option::is_none")] pub id: Option, - /// The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. - #[serde(rename = "max_bytes", skip_serializing_if = "Option::is_none")] - pub max_bytes: Option, /// Name of the LLM model offered by OpenAI, Cloudflare or vLLM #[serde(rename = "model_name", skip_serializing_if = "Option::is_none")] pub model_name: Option, + /// The LLM service's API Key + #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] + pub api_key: Option, + /// Typesense collection that stores the historical conversations + #[serde(rename = "history_collection", skip_serializing_if = "Option::is_none")] + pub history_collection: Option, + /// LLM service's account ID (only applicable for Cloudflare) + #[serde(rename = "account_id", skip_serializing_if = "Option::is_none")] + pub account_id: Option, /// The system prompt that contains special instructions to the LLM #[serde(rename = "system_prompt", skip_serializing_if = "Option::is_none")] pub system_prompt: Option, /// Time interval in seconds after which the messages would be deleted. Default: 86400 (24 hours) #[serde(rename = "ttl", skip_serializing_if = "Option::is_none")] pub ttl: Option, + /// The maximum number of bytes to send to the LLM in every API call. Consult the LLM's documentation on the number of bytes supported in the context window. + #[serde(rename = "max_bytes", skip_serializing_if = "Option::is_none")] + pub max_bytes: Option, /// URL of vLLM service #[serde(rename = "vllm_url", skip_serializing_if = "Option::is_none")] pub vllm_url: Option, @@ -45,14 +45,14 @@ pub struct ConversationModelUpdateSchema { impl ConversationModelUpdateSchema { pub fn new() -> ConversationModelUpdateSchema { ConversationModelUpdateSchema { - account_id: None, - api_key: None, - history_collection: None, id: None, - max_bytes: None, model_name: None, + api_key: None, + history_collection: None, + account_id: None, system_prompt: None, ttl: None, + max_bytes: None, vllm_url: None, } } diff --git a/typesense_codegen/src/models/delete_documents_delete_documents_parameters_parameter.rs b/typesense_codegen/src/models/delete_documents_delete_documents_parameters_parameter.rs new file mode 100644 index 0000000..71531e2 --- /dev/null +++ b/typesense_codegen/src/models/delete_documents_delete_documents_parameters_parameter.rs @@ -0,0 +1,38 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct DeleteDocumentsDeleteDocumentsParametersParameter { + #[serde(rename = "filter_by")] + pub filter_by: String, + /// Batch size parameter controls the number of documents that should be deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. + #[serde(rename = "batch_size", skip_serializing_if = "Option::is_none")] + pub batch_size: Option, + #[serde(rename = "ignore_not_found", skip_serializing_if = "Option::is_none")] + pub ignore_not_found: Option, + /// When true, removes all documents from the collection while preserving the collection and its schema. + #[serde(rename = "truncate", skip_serializing_if = "Option::is_none")] + pub truncate: Option, +} + +impl DeleteDocumentsDeleteDocumentsParametersParameter { + pub fn new(filter_by: String) -> DeleteDocumentsDeleteDocumentsParametersParameter { + DeleteDocumentsDeleteDocumentsParametersParameter { + filter_by, + batch_size: None, + ignore_not_found: None, + truncate: None, + } + } +} + diff --git a/typesense_codegen/src/models/delete_documents_parameters.rs b/typesense_codegen/src/models/delete_documents_parameters.rs new file mode 100644 index 0000000..1833431 --- /dev/null +++ b/typesense_codegen/src/models/delete_documents_parameters.rs @@ -0,0 +1,38 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct DeleteDocumentsParameters { + #[serde(rename = "filter_by")] + pub filter_by: String, + /// Batch size parameter controls the number of documents that should be deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. + #[serde(rename = "batch_size", skip_serializing_if = "Option::is_none")] + pub batch_size: Option, + #[serde(rename = "ignore_not_found", skip_serializing_if = "Option::is_none")] + pub ignore_not_found: Option, + /// When true, removes all documents from the collection while preserving the collection and its schema. + #[serde(rename = "truncate", skip_serializing_if = "Option::is_none")] + pub truncate: Option, +} + +impl DeleteDocumentsParameters { + pub fn new(filter_by: String) -> DeleteDocumentsParameters { + DeleteDocumentsParameters { + filter_by, + batch_size: None, + ignore_not_found: None, + truncate: None, + } + } +} + diff --git a/typesense_codegen/src/models/export_documents_export_documents_parameters_parameter.rs b/typesense_codegen/src/models/export_documents_export_documents_parameters_parameter.rs new file mode 100644 index 0000000..3d35251 --- /dev/null +++ b/typesense_codegen/src/models/export_documents_export_documents_parameters_parameter.rs @@ -0,0 +1,36 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct ExportDocumentsExportDocumentsParametersParameter { + /// Filter conditions for refining your search results. Separate multiple conditions with &&. + #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] + pub filter_by: Option, + /// List of fields from the document to include in the search result + #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] + pub include_fields: Option, + /// List of fields from the document to exclude in the search result + #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] + pub exclude_fields: Option, +} + +impl ExportDocumentsExportDocumentsParametersParameter { + pub fn new() -> ExportDocumentsExportDocumentsParametersParameter { + ExportDocumentsExportDocumentsParametersParameter { + filter_by: None, + include_fields: None, + exclude_fields: None, + } + } +} + diff --git a/typesense_codegen/src/models/export_documents_parameters.rs b/typesense_codegen/src/models/export_documents_parameters.rs new file mode 100644 index 0000000..f6c8384 --- /dev/null +++ b/typesense_codegen/src/models/export_documents_parameters.rs @@ -0,0 +1,36 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct ExportDocumentsParameters { + /// Filter conditions for refining your search results. Separate multiple conditions with &&. + #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] + pub filter_by: Option, + /// List of fields from the document to include in the search result + #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] + pub include_fields: Option, + /// List of fields from the document to exclude in the search result + #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] + pub exclude_fields: Option, +} + +impl ExportDocumentsParameters { + pub fn new() -> ExportDocumentsParameters { + ExportDocumentsParameters { + filter_by: None, + include_fields: None, + exclude_fields: None, + } + } +} + diff --git a/typesense_codegen/src/models/facet_counts_counts_inner.rs b/typesense_codegen/src/models/facet_counts_counts_inner.rs index fbc80d8..0ff18c9 100644 --- a/typesense_codegen/src/models/facet_counts_counts_inner.rs +++ b/typesense_codegen/src/models/facet_counts_counts_inner.rs @@ -17,10 +17,10 @@ pub struct FacetCountsCountsInner { pub count: Option, #[serde(rename = "highlighted", skip_serializing_if = "Option::is_none")] pub highlighted: Option, - #[serde(rename = "parent", skip_serializing_if = "Option::is_none")] - pub parent: Option, #[serde(rename = "value", skip_serializing_if = "Option::is_none")] pub value: Option, + #[serde(rename = "parent", skip_serializing_if = "Option::is_none")] + pub parent: Option, } impl FacetCountsCountsInner { @@ -28,8 +28,8 @@ impl FacetCountsCountsInner { FacetCountsCountsInner { count: None, highlighted: None, - parent: None, value: None, + parent: None, } } } diff --git a/typesense_codegen/src/models/facet_counts_stats.rs b/typesense_codegen/src/models/facet_counts_stats.rs index f90642d..25d71ab 100644 --- a/typesense_codegen/src/models/facet_counts_stats.rs +++ b/typesense_codegen/src/models/facet_counts_stats.rs @@ -13,8 +13,6 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct FacetCountsStats { - #[serde(rename = "avg", skip_serializing_if = "Option::is_none")] - pub avg: Option, #[serde(rename = "max", skip_serializing_if = "Option::is_none")] pub max: Option, #[serde(rename = "min", skip_serializing_if = "Option::is_none")] @@ -23,16 +21,18 @@ pub struct FacetCountsStats { pub sum: Option, #[serde(rename = "total_values", skip_serializing_if = "Option::is_none")] pub total_values: Option, + #[serde(rename = "avg", skip_serializing_if = "Option::is_none")] + pub avg: Option, } impl FacetCountsStats { pub fn new() -> FacetCountsStats { FacetCountsStats { - avg: None, max: None, min: None, sum: None, total_values: None, + avg: None, } } } diff --git a/typesense_codegen/src/models/field.rs b/typesense_codegen/src/models/field.rs index dcf32da..5e260bc 100644 --- a/typesense_codegen/src/models/field.rs +++ b/typesense_codegen/src/models/field.rs @@ -13,76 +13,76 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct Field { - #[serde(rename = "drop", skip_serializing_if = "Option::is_none")] - pub drop: Option, - #[serde(rename = "embed", skip_serializing_if = "Option::is_none")] - pub embed: Option>, + #[serde(rename = "name")] + pub name: String, + #[serde(rename = "type")] + pub r#type: String, + #[serde(rename = "optional", skip_serializing_if = "Option::is_none")] + pub optional: Option, #[serde(rename = "facet", skip_serializing_if = "Option::is_none")] pub facet: Option, #[serde(rename = "index", skip_serializing_if = "Option::is_none")] pub index: Option, - #[serde(rename = "infix", skip_serializing_if = "Option::is_none")] - pub infix: Option, #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] pub locale: Option, - #[serde(rename = "name")] - pub name: String, + #[serde(rename = "sort", skip_serializing_if = "Option::is_none")] + pub sort: Option, + #[serde(rename = "infix", skip_serializing_if = "Option::is_none")] + pub infix: Option, + /// Name of a field in another collection that should be linked to this collection so that it can be joined during query. + #[serde(rename = "reference", skip_serializing_if = "Option::is_none")] + pub reference: Option, #[serde(rename = "num_dim", skip_serializing_if = "Option::is_none")] pub num_dim: Option, - #[serde(rename = "optional", skip_serializing_if = "Option::is_none")] - pub optional: Option, + #[serde(rename = "drop", skip_serializing_if = "Option::is_none")] + pub drop: Option, + /// When set to false, the field value will not be stored on disk. Default: true. + #[serde(rename = "store", skip_serializing_if = "Option::is_none")] + pub store: Option, + /// The distance metric to be used for vector search. Default: `cosine`. You can also use `ip` for inner product. + #[serde(rename = "vec_dist", skip_serializing_if = "Option::is_none")] + pub vec_dist: Option, /// Enables an index optimized for range filtering on numerical fields (e.g. rating:>3.5). Default: false. #[serde(rename = "range_index", skip_serializing_if = "Option::is_none")] pub range_index: Option, - /// Name of a field in another collection that should be linked to this collection so that it can be joined during query. - #[serde(rename = "reference", skip_serializing_if = "Option::is_none")] - pub reference: Option, - #[serde(rename = "sort", skip_serializing_if = "Option::is_none")] - pub sort: Option, /// Values are stemmed before indexing in-memory. Default: false. #[serde(rename = "stem", skip_serializing_if = "Option::is_none")] pub stem: Option, /// Name of the stemming dictionary to use for this field #[serde(rename = "stem_dictionary", skip_serializing_if = "Option::is_none")] pub stem_dictionary: Option, - /// When set to false, the field value will not be stored on disk. Default: true. - #[serde(rename = "store", skip_serializing_if = "Option::is_none")] - pub store: Option, - /// List of symbols or special characters to be indexed. - #[serde(rename = "symbols_to_index", skip_serializing_if = "Option::is_none")] - pub symbols_to_index: Option>, /// List of symbols or special characters to be used for splitting the text into individual words in addition to space and new-line characters. #[serde(rename = "token_separators", skip_serializing_if = "Option::is_none")] pub token_separators: Option>, - #[serde(rename = "type")] - pub r#type: String, - /// The distance metric to be used for vector search. Default: `cosine`. You can also use `ip` for inner product. - #[serde(rename = "vec_dist", skip_serializing_if = "Option::is_none")] - pub vec_dist: Option, + /// List of symbols or special characters to be indexed. + #[serde(rename = "symbols_to_index", skip_serializing_if = "Option::is_none")] + pub symbols_to_index: Option>, + #[serde(rename = "embed", skip_serializing_if = "Option::is_none")] + pub embed: Option>, } impl Field { pub fn new(name: String, r#type: String) -> Field { Field { - drop: None, - embed: None, + name, + r#type, + optional: None, facet: None, index: None, - infix: None, locale: None, - name, + sort: None, + infix: None, + reference: None, num_dim: None, - optional: None, + drop: None, + store: None, + vec_dist: None, range_index: None, - reference: None, - sort: None, stem: None, stem_dictionary: None, - store: None, - symbols_to_index: None, token_separators: None, - r#type, - vec_dist: None, + symbols_to_index: None, + embed: None, } } } diff --git a/typesense_codegen/src/models/field_embed_model_config.rs b/typesense_codegen/src/models/field_embed_model_config.rs index 91f9939..479a253 100644 --- a/typesense_codegen/src/models/field_embed_model_config.rs +++ b/typesense_codegen/src/models/field_embed_model_config.rs @@ -13,41 +13,41 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct FieldEmbedModelConfig { - #[serde(rename = "access_token", skip_serializing_if = "Option::is_none")] - pub access_token: Option, + #[serde(rename = "model_name")] + pub model_name: String, #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] pub api_key: Option, + #[serde(rename = "url", skip_serializing_if = "Option::is_none")] + pub url: Option, + #[serde(rename = "access_token", skip_serializing_if = "Option::is_none")] + pub access_token: Option, + #[serde(rename = "refresh_token", skip_serializing_if = "Option::is_none")] + pub refresh_token: Option, #[serde(rename = "client_id", skip_serializing_if = "Option::is_none")] pub client_id: Option, #[serde(rename = "client_secret", skip_serializing_if = "Option::is_none")] pub client_secret: Option, - #[serde(rename = "indexing_prefix", skip_serializing_if = "Option::is_none")] - pub indexing_prefix: Option, - #[serde(rename = "model_name")] - pub model_name: String, #[serde(rename = "project_id", skip_serializing_if = "Option::is_none")] pub project_id: Option, + #[serde(rename = "indexing_prefix", skip_serializing_if = "Option::is_none")] + pub indexing_prefix: Option, #[serde(rename = "query_prefix", skip_serializing_if = "Option::is_none")] pub query_prefix: Option, - #[serde(rename = "refresh_token", skip_serializing_if = "Option::is_none")] - pub refresh_token: Option, - #[serde(rename = "url", skip_serializing_if = "Option::is_none")] - pub url: Option, } impl FieldEmbedModelConfig { pub fn new(model_name: String) -> FieldEmbedModelConfig { FieldEmbedModelConfig { - access_token: None, + model_name, api_key: None, + url: None, + access_token: None, + refresh_token: None, client_id: None, client_secret: None, - indexing_prefix: None, - model_name, project_id: None, + indexing_prefix: None, query_prefix: None, - refresh_token: None, - url: None, } } } diff --git a/typesense_codegen/src/models/import_documents_import_documents_parameters_parameter.rs b/typesense_codegen/src/models/import_documents_import_documents_parameters_parameter.rs new file mode 100644 index 0000000..321d0ed --- /dev/null +++ b/typesense_codegen/src/models/import_documents_import_documents_parameters_parameter.rs @@ -0,0 +1,43 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct ImportDocumentsImportDocumentsParametersParameter { + #[serde(rename = "batch_size", skip_serializing_if = "Option::is_none")] + pub batch_size: Option, + /// Returning the id of the imported documents. If you want the import response to return the ingested document's id in the response, you can use the return_id parameter. + #[serde(rename = "return_id", skip_serializing_if = "Option::is_none")] + pub return_id: Option, + #[serde(rename = "remote_embedding_batch_size", skip_serializing_if = "Option::is_none")] + pub remote_embedding_batch_size: Option, + #[serde(rename = "return_doc", skip_serializing_if = "Option::is_none")] + pub return_doc: Option, + #[serde(rename = "action", skip_serializing_if = "Option::is_none")] + pub action: Option, + #[serde(rename = "dirty_values", skip_serializing_if = "Option::is_none")] + pub dirty_values: Option, +} + +impl ImportDocumentsImportDocumentsParametersParameter { + pub fn new() -> ImportDocumentsImportDocumentsParametersParameter { + ImportDocumentsImportDocumentsParametersParameter { + batch_size: None, + return_id: None, + remote_embedding_batch_size: None, + return_doc: None, + action: None, + dirty_values: None, + } + } +} + diff --git a/typesense_codegen/src/models/import_documents_parameters.rs b/typesense_codegen/src/models/import_documents_parameters.rs new file mode 100644 index 0000000..c10e880 --- /dev/null +++ b/typesense_codegen/src/models/import_documents_parameters.rs @@ -0,0 +1,43 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct ImportDocumentsParameters { + #[serde(rename = "batch_size", skip_serializing_if = "Option::is_none")] + pub batch_size: Option, + /// Returning the id of the imported documents. If you want the import response to return the ingested document's id in the response, you can use the return_id parameter. + #[serde(rename = "return_id", skip_serializing_if = "Option::is_none")] + pub return_id: Option, + #[serde(rename = "remote_embedding_batch_size", skip_serializing_if = "Option::is_none")] + pub remote_embedding_batch_size: Option, + #[serde(rename = "return_doc", skip_serializing_if = "Option::is_none")] + pub return_doc: Option, + #[serde(rename = "action", skip_serializing_if = "Option::is_none")] + pub action: Option, + #[serde(rename = "dirty_values", skip_serializing_if = "Option::is_none")] + pub dirty_values: Option, +} + +impl ImportDocumentsParameters { + pub fn new() -> ImportDocumentsParameters { + ImportDocumentsParameters { + batch_size: None, + return_id: None, + remote_embedding_batch_size: None, + return_doc: None, + action: None, + dirty_values: None, + } + } +} + diff --git a/typesense_codegen/src/models/mod.rs b/typesense_codegen/src/models/mod.rs index 876cc6d..4af5098 100644 --- a/typesense_codegen/src/models/mod.rs +++ b/typesense_codegen/src/models/mod.rs @@ -52,6 +52,8 @@ pub mod debug_200_response; pub use self::debug_200_response::Debug200Response; pub mod delete_documents_200_response; pub use self::delete_documents_200_response::DeleteDocuments200Response; +pub mod delete_documents_parameters; +pub use self::delete_documents_parameters::DeleteDocumentsParameters; pub mod delete_stopwords_set_200_response; pub use self::delete_stopwords_set_200_response::DeleteStopwordsSet200Response; pub mod dirty_values; @@ -62,6 +64,8 @@ pub mod drop_tokens_mode; pub use self::drop_tokens_mode::DropTokensMode; pub mod error_response; pub use self::error_response::ErrorResponse; +pub mod export_documents_parameters; +pub use self::export_documents_parameters::ExportDocumentsParameters; pub mod facet_counts; pub use self::facet_counts::FacetCounts; pub mod facet_counts_counts_inner; @@ -76,6 +80,8 @@ pub mod field_embed_model_config; pub use self::field_embed_model_config::FieldEmbedModelConfig; pub mod health_status; pub use self::health_status::HealthStatus; +pub mod import_documents_parameters; +pub use self::import_documents_parameters::ImportDocumentsParameters; pub mod index_action; pub use self::index_action::IndexAction; pub mod list_stemming_dictionaries_200_response; @@ -90,6 +96,14 @@ pub mod multi_search_result_item; pub use self::multi_search_result_item::MultiSearchResultItem; pub mod multi_search_searches_parameter; pub use self::multi_search_searches_parameter::MultiSearchSearchesParameter; +pub mod nl_search_model_base; +pub use self::nl_search_model_base::NlSearchModelBase; +pub mod nl_search_model_create_schema; +pub use self::nl_search_model_create_schema::NlSearchModelCreateSchema; +pub mod nl_search_model_delete_schema; +pub use self::nl_search_model_delete_schema::NlSearchModelDeleteSchema; +pub mod nl_search_model_schema; +pub use self::nl_search_model_schema::NlSearchModelSchema; pub mod preset_delete_schema; pub use self::preset_delete_schema::PresetDeleteSchema; pub mod preset_schema; @@ -162,5 +176,7 @@ pub mod success_status; pub use self::success_status::SuccessStatus; pub mod update_documents_200_response; pub use self::update_documents_200_response::UpdateDocuments200Response; +pub mod update_documents_parameters; +pub use self::update_documents_parameters::UpdateDocumentsParameters; pub mod voice_query_model_collection_config; pub use self::voice_query_model_collection_config::VoiceQueryModelCollectionConfig; diff --git a/typesense_codegen/src/models/multi_search_collection_parameters.rs b/typesense_codegen/src/models/multi_search_collection_parameters.rs index ad18f80..0848b8e 100644 --- a/typesense_codegen/src/models/multi_search_collection_parameters.rs +++ b/typesense_codegen/src/models/multi_search_collection_parameters.rs @@ -13,274 +13,274 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct MultiSearchCollectionParameters { - /// The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. - #[serde(rename = "cache_ttl", skip_serializing_if = "Option::is_none")] - pub cache_ttl: Option, - /// Enable conversational search. - #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] - pub conversation: Option, - /// The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. - #[serde(rename = "conversation_id", skip_serializing_if = "Option::is_none")] - pub conversation_id: Option, - /// The Id of Conversation Model to be used. - #[serde(rename = "conversation_model_id", skip_serializing_if = "Option::is_none")] - pub conversation_model_id: Option, - #[serde(rename = "drop_tokens_mode", skip_serializing_if = "Option::is_none")] - pub drop_tokens_mode: Option, - /// If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 - #[serde(rename = "drop_tokens_threshold", skip_serializing_if = "Option::is_none")] - pub drop_tokens_threshold: Option, - /// If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false - #[serde(rename = "enable_overrides", skip_serializing_if = "Option::is_none")] - pub enable_overrides: Option, - /// If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true - #[serde(rename = "enable_synonyms", skip_serializing_if = "Option::is_none")] - pub enable_synonyms: Option, - /// Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. - #[serde(rename = "enable_typos_for_alpha_numerical_tokens", skip_serializing_if = "Option::is_none")] - pub enable_typos_for_alpha_numerical_tokens: Option, - /// Make Typesense disable typos for numerical tokens. - #[serde(rename = "enable_typos_for_numerical_tokens", skip_serializing_if = "Option::is_none")] - pub enable_typos_for_numerical_tokens: Option, - /// List of fields from the document to exclude in the search result - #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] - pub exclude_fields: Option, - /// Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). - #[serde(rename = "exhaustive_search", skip_serializing_if = "Option::is_none")] - pub exhaustive_search: Option, - /// A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. - #[serde(rename = "facet_by", skip_serializing_if = "Option::is_none")] - pub facet_by: Option, - /// Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". - #[serde(rename = "facet_query", skip_serializing_if = "Option::is_none")] - pub facet_query: Option, - /// Comma separated string of nested facet fields whose parent object should be returned in facet response. - #[serde(rename = "facet_return_parent", skip_serializing_if = "Option::is_none")] - pub facet_return_parent: Option, - /// Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). - #[serde(rename = "facet_strategy", skip_serializing_if = "Option::is_none")] - pub facet_strategy: Option, - /// Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. - #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, - /// Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false - #[serde(rename = "filter_curated_hits", skip_serializing_if = "Option::is_none")] - pub filter_curated_hits: Option, - /// You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. - #[serde(rename = "group_by", skip_serializing_if = "Option::is_none")] - pub group_by: Option, - /// Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 - #[serde(rename = "group_limit", skip_serializing_if = "Option::is_none")] - pub group_limit: Option, - /// Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true - #[serde(rename = "group_missing_values", skip_serializing_if = "Option::is_none")] - pub group_missing_values: Option, - /// A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - #[serde(rename = "hidden_hits", skip_serializing_if = "Option::is_none")] - pub hidden_hits: Option, - /// The number of tokens that should surround the highlighted text on each side. Default: 4 - #[serde(rename = "highlight_affix_num_tokens", skip_serializing_if = "Option::is_none")] - pub highlight_affix_num_tokens: Option, - /// The end tag used for the highlighted snippets. Default: `` - #[serde(rename = "highlight_end_tag", skip_serializing_if = "Option::is_none")] - pub highlight_end_tag: Option, - /// A list of custom fields that must be highlighted even if you don't query for them - #[serde(rename = "highlight_fields", skip_serializing_if = "Option::is_none")] - pub highlight_fields: Option, - /// List of fields which should be highlighted fully without snippeting - #[serde(rename = "highlight_full_fields", skip_serializing_if = "Option::is_none")] - pub highlight_full_fields: Option, - /// The start tag used for the highlighted snippets. Default: `` - #[serde(rename = "highlight_start_tag", skip_serializing_if = "Option::is_none")] - pub highlight_start_tag: Option, - /// List of fields from the document to include in the search result - #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] - pub include_fields: Option, + /// The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. + #[serde(rename = "q", skip_serializing_if = "Option::is_none")] + pub q: Option, + /// A list of `string` fields that should be queried against. Multiple fields are separated with a comma. + #[serde(rename = "query_by", skip_serializing_if = "Option::is_none")] + pub query_by: Option, + /// The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. + #[serde(rename = "query_by_weights", skip_serializing_if = "Option::is_none")] + pub query_by_weights: Option, + /// In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. + #[serde(rename = "text_match_type", skip_serializing_if = "Option::is_none")] + pub text_match_type: Option, + /// Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. + #[serde(rename = "prefix", skip_serializing_if = "Option::is_none")] + pub prefix: Option, /// If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results #[serde(rename = "infix", skip_serializing_if = "Option::is_none")] pub infix: Option, - /// Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. - #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] - pub limit: Option, /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. #[serde(rename = "max_extra_prefix", skip_serializing_if = "Option::is_none")] pub max_extra_prefix: Option, /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. #[serde(rename = "max_extra_suffix", skip_serializing_if = "Option::is_none")] pub max_extra_suffix: Option, + /// Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. + #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] + pub filter_by: Option, + /// A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` + #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] + pub sort_by: Option, + /// A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. + #[serde(rename = "facet_by", skip_serializing_if = "Option::is_none")] + pub facet_by: Option, /// Maximum number of facet values to be returned. #[serde(rename = "max_facet_values", skip_serializing_if = "Option::is_none")] pub max_facet_values: Option, - /// Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - #[serde(rename = "min_len_1typo", skip_serializing_if = "Option::is_none")] - pub min_len_1typo: Option, - /// Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - #[serde(rename = "min_len_2typo", skip_serializing_if = "Option::is_none")] - pub min_len_2typo: Option, + /// Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". + #[serde(rename = "facet_query", skip_serializing_if = "Option::is_none")] + pub facet_query: Option, /// The number of typographical errors (1 or 2) that would be tolerated. Default: 2 #[serde(rename = "num_typos", skip_serializing_if = "Option::is_none")] pub num_typos: Option, - /// Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. - #[serde(rename = "offset", skip_serializing_if = "Option::is_none")] - pub offset: Option, - /// Comma separated list of tags to trigger the curations rules that match the tags. - #[serde(rename = "override_tags", skip_serializing_if = "Option::is_none")] - pub override_tags: Option, /// Results from this specific page number would be fetched. #[serde(rename = "page", skip_serializing_if = "Option::is_none")] pub page: Option, /// Number of results to fetch per page. Default: 10 #[serde(rename = "per_page", skip_serializing_if = "Option::is_none")] pub per_page: Option, + /// Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. + #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. + #[serde(rename = "offset", skip_serializing_if = "Option::is_none")] + pub offset: Option, + /// You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. + #[serde(rename = "group_by", skip_serializing_if = "Option::is_none")] + pub group_by: Option, + /// Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 + #[serde(rename = "group_limit", skip_serializing_if = "Option::is_none")] + pub group_limit: Option, + /// Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true + #[serde(rename = "group_missing_values", skip_serializing_if = "Option::is_none")] + pub group_missing_values: Option, + /// List of fields from the document to include in the search result + #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] + pub include_fields: Option, + /// List of fields from the document to exclude in the search result + #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] + pub exclude_fields: Option, + /// List of fields which should be highlighted fully without snippeting + #[serde(rename = "highlight_full_fields", skip_serializing_if = "Option::is_none")] + pub highlight_full_fields: Option, + /// The number of tokens that should surround the highlighted text on each side. Default: 4 + #[serde(rename = "highlight_affix_num_tokens", skip_serializing_if = "Option::is_none")] + pub highlight_affix_num_tokens: Option, + /// The start tag used for the highlighted snippets. Default: `` + #[serde(rename = "highlight_start_tag", skip_serializing_if = "Option::is_none")] + pub highlight_start_tag: Option, + /// The end tag used for the highlighted snippets. Default: `` + #[serde(rename = "highlight_end_tag", skip_serializing_if = "Option::is_none")] + pub highlight_end_tag: Option, + /// Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 + #[serde(rename = "snippet_threshold", skip_serializing_if = "Option::is_none")] + pub snippet_threshold: Option, + /// If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 + #[serde(rename = "drop_tokens_threshold", skip_serializing_if = "Option::is_none")] + pub drop_tokens_threshold: Option, + #[serde(rename = "drop_tokens_mode", skip_serializing_if = "Option::is_none")] + pub drop_tokens_mode: Option, + /// If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 + #[serde(rename = "typo_tokens_threshold", skip_serializing_if = "Option::is_none")] + pub typo_tokens_threshold: Option, + /// Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. + #[serde(rename = "enable_typos_for_alpha_numerical_tokens", skip_serializing_if = "Option::is_none")] + pub enable_typos_for_alpha_numerical_tokens: Option, + /// Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false + #[serde(rename = "filter_curated_hits", skip_serializing_if = "Option::is_none")] + pub filter_curated_hits: Option, + /// If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true + #[serde(rename = "enable_synonyms", skip_serializing_if = "Option::is_none")] + pub enable_synonyms: Option, + /// Allow synonym resolution on word prefixes in the query. Default: false + #[serde(rename = "synonym_prefix", skip_serializing_if = "Option::is_none")] + pub synonym_prefix: Option, + /// Allow synonym resolution on typo-corrected words in the query. Default: 0 + #[serde(rename = "synonym_num_typos", skip_serializing_if = "Option::is_none")] + pub synonym_num_typos: Option, /// A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. #[serde(rename = "pinned_hits", skip_serializing_if = "Option::is_none")] pub pinned_hits: Option, + /// A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + #[serde(rename = "hidden_hits", skip_serializing_if = "Option::is_none")] + pub hidden_hits: Option, + /// Comma separated list of tags to trigger the curations rules that match the tags. + #[serde(rename = "override_tags", skip_serializing_if = "Option::is_none")] + pub override_tags: Option, + /// A list of custom fields that must be highlighted even if you don't query for them + #[serde(rename = "highlight_fields", skip_serializing_if = "Option::is_none")] + pub highlight_fields: Option, /// You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same #[serde(rename = "pre_segmented_query", skip_serializing_if = "Option::is_none")] pub pre_segmented_query: Option, - /// Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. - #[serde(rename = "prefix", skip_serializing_if = "Option::is_none")] - pub prefix: Option, /// Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. #[serde(rename = "preset", skip_serializing_if = "Option::is_none")] pub preset: Option, + /// If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false + #[serde(rename = "enable_overrides", skip_serializing_if = "Option::is_none")] + pub enable_overrides: Option, /// Set this parameter to true to ensure that an exact match is ranked above the others #[serde(rename = "prioritize_exact_match", skip_serializing_if = "Option::is_none")] pub prioritize_exact_match: Option, - /// Make Typesense prioritize documents where the query words appear in more number of fields. - #[serde(rename = "prioritize_num_matching_fields", skip_serializing_if = "Option::is_none")] - pub prioritize_num_matching_fields: Option, /// Make Typesense prioritize documents where the query words appear earlier in the text. #[serde(rename = "prioritize_token_position", skip_serializing_if = "Option::is_none")] pub prioritize_token_position: Option, - /// The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. - #[serde(rename = "q", skip_serializing_if = "Option::is_none")] - pub q: Option, - /// A list of `string` fields that should be queried against. Multiple fields are separated with a comma. - #[serde(rename = "query_by", skip_serializing_if = "Option::is_none")] - pub query_by: Option, - /// The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. - #[serde(rename = "query_by_weights", skip_serializing_if = "Option::is_none")] - pub query_by_weights: Option, - /// Number of times to retry fetching remote embeddings. - #[serde(rename = "remote_embedding_num_tries", skip_serializing_if = "Option::is_none")] - pub remote_embedding_num_tries: Option, - /// Timeout (in milliseconds) for fetching remote embeddings. - #[serde(rename = "remote_embedding_timeout_ms", skip_serializing_if = "Option::is_none")] - pub remote_embedding_timeout_ms: Option, + /// Make Typesense prioritize documents where the query words appear in more number of fields. + #[serde(rename = "prioritize_num_matching_fields", skip_serializing_if = "Option::is_none")] + pub prioritize_num_matching_fields: Option, + /// Make Typesense disable typos for numerical tokens. + #[serde(rename = "enable_typos_for_numerical_tokens", skip_serializing_if = "Option::is_none")] + pub enable_typos_for_numerical_tokens: Option, + /// Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). + #[serde(rename = "exhaustive_search", skip_serializing_if = "Option::is_none")] + pub exhaustive_search: Option, /// Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. #[serde(rename = "search_cutoff_ms", skip_serializing_if = "Option::is_none")] pub search_cutoff_ms: Option, - /// Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 - #[serde(rename = "snippet_threshold", skip_serializing_if = "Option::is_none")] - pub snippet_threshold: Option, - /// A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` - #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] - pub sort_by: Option, - /// Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. - #[serde(rename = "stopwords", skip_serializing_if = "Option::is_none")] - pub stopwords: Option, - /// Allow synonym resolution on typo-corrected words in the query. Default: 0 - #[serde(rename = "synonym_num_typos", skip_serializing_if = "Option::is_none")] - pub synonym_num_typos: Option, - /// Allow synonym resolution on word prefixes in the query. Default: false - #[serde(rename = "synonym_prefix", skip_serializing_if = "Option::is_none")] - pub synonym_prefix: Option, - /// In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. - #[serde(rename = "text_match_type", skip_serializing_if = "Option::is_none")] - pub text_match_type: Option, - /// If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 - #[serde(rename = "typo_tokens_threshold", skip_serializing_if = "Option::is_none")] - pub typo_tokens_threshold: Option, /// Enable server side caching of search query results. By default, caching is disabled. #[serde(rename = "use_cache", skip_serializing_if = "Option::is_none")] pub use_cache: Option, + /// The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. + #[serde(rename = "cache_ttl", skip_serializing_if = "Option::is_none")] + pub cache_ttl: Option, + /// Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + #[serde(rename = "min_len_1typo", skip_serializing_if = "Option::is_none")] + pub min_len_1typo: Option, + /// Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + #[serde(rename = "min_len_2typo", skip_serializing_if = "Option::is_none")] + pub min_len_2typo: Option, /// Vector query expression for fetching documents \"closest\" to a given query/document vector. #[serde(rename = "vector_query", skip_serializing_if = "Option::is_none")] pub vector_query: Option, + /// Timeout (in milliseconds) for fetching remote embeddings. + #[serde(rename = "remote_embedding_timeout_ms", skip_serializing_if = "Option::is_none")] + pub remote_embedding_timeout_ms: Option, + /// Number of times to retry fetching remote embeddings. + #[serde(rename = "remote_embedding_num_tries", skip_serializing_if = "Option::is_none")] + pub remote_embedding_num_tries: Option, + /// Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). + #[serde(rename = "facet_strategy", skip_serializing_if = "Option::is_none")] + pub facet_strategy: Option, + /// Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. + #[serde(rename = "stopwords", skip_serializing_if = "Option::is_none")] + pub stopwords: Option, + /// Comma separated string of nested facet fields whose parent object should be returned in facet response. + #[serde(rename = "facet_return_parent", skip_serializing_if = "Option::is_none")] + pub facet_return_parent: Option, /// The base64 encoded audio file in 16 khz 16-bit WAV format. #[serde(rename = "voice_query", skip_serializing_if = "Option::is_none")] pub voice_query: Option, + /// Enable conversational search. + #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] + pub conversation: Option, + /// The Id of Conversation Model to be used. + #[serde(rename = "conversation_model_id", skip_serializing_if = "Option::is_none")] + pub conversation_model_id: Option, + /// The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. + #[serde(rename = "conversation_id", skip_serializing_if = "Option::is_none")] + pub conversation_id: Option, /// The collection to search in. #[serde(rename = "collection", skip_serializing_if = "Option::is_none")] pub collection: Option, - /// When true, computes both text match and vector distance scores for all matches in hybrid search. Documents found only through keyword search will get a vector distance score, and documents found only through vector search will get a text match score. - #[serde(rename = "rerank_hybrid_matches", skip_serializing_if = "Option::is_none")] - pub rerank_hybrid_matches: Option, /// A separate search API key for each search within a multi_search request #[serde(rename = "x-typesense-api-key", skip_serializing_if = "Option::is_none")] pub x_typesense_api_key: Option, + /// When true, computes both text match and vector distance scores for all matches in hybrid search. Documents found only through keyword search will get a vector distance score, and documents found only through vector search will get a text match score. + #[serde(rename = "rerank_hybrid_matches", skip_serializing_if = "Option::is_none")] + pub rerank_hybrid_matches: Option, } impl MultiSearchCollectionParameters { pub fn new() -> MultiSearchCollectionParameters { MultiSearchCollectionParameters { - cache_ttl: None, - conversation: None, - conversation_id: None, - conversation_model_id: None, - drop_tokens_mode: None, - drop_tokens_threshold: None, - enable_overrides: None, - enable_synonyms: None, - enable_typos_for_alpha_numerical_tokens: None, - enable_typos_for_numerical_tokens: None, - exclude_fields: None, - exhaustive_search: None, - facet_by: None, - facet_query: None, - facet_return_parent: None, - facet_strategy: None, - filter_by: None, - filter_curated_hits: None, - group_by: None, - group_limit: None, - group_missing_values: None, - hidden_hits: None, - highlight_affix_num_tokens: None, - highlight_end_tag: None, - highlight_fields: None, - highlight_full_fields: None, - highlight_start_tag: None, - include_fields: None, + q: None, + query_by: None, + query_by_weights: None, + text_match_type: None, + prefix: None, infix: None, - limit: None, max_extra_prefix: None, max_extra_suffix: None, + filter_by: None, + sort_by: None, + facet_by: None, max_facet_values: None, - min_len_1typo: None, - min_len_2typo: None, + facet_query: None, num_typos: None, - offset: None, - override_tags: None, page: None, per_page: None, + limit: None, + offset: None, + group_by: None, + group_limit: None, + group_missing_values: None, + include_fields: None, + exclude_fields: None, + highlight_full_fields: None, + highlight_affix_num_tokens: None, + highlight_start_tag: None, + highlight_end_tag: None, + snippet_threshold: None, + drop_tokens_threshold: None, + drop_tokens_mode: None, + typo_tokens_threshold: None, + enable_typos_for_alpha_numerical_tokens: None, + filter_curated_hits: None, + enable_synonyms: None, + synonym_prefix: None, + synonym_num_typos: None, pinned_hits: None, + hidden_hits: None, + override_tags: None, + highlight_fields: None, pre_segmented_query: None, - prefix: None, preset: None, + enable_overrides: None, prioritize_exact_match: None, - prioritize_num_matching_fields: None, prioritize_token_position: None, - q: None, - query_by: None, - query_by_weights: None, - remote_embedding_num_tries: None, - remote_embedding_timeout_ms: None, + prioritize_num_matching_fields: None, + enable_typos_for_numerical_tokens: None, + exhaustive_search: None, search_cutoff_ms: None, - snippet_threshold: None, - sort_by: None, - stopwords: None, - synonym_num_typos: None, - synonym_prefix: None, - text_match_type: None, - typo_tokens_threshold: None, use_cache: None, + cache_ttl: None, + min_len_1typo: None, + min_len_2typo: None, vector_query: None, + remote_embedding_timeout_ms: None, + remote_embedding_num_tries: None, + facet_strategy: None, + stopwords: None, + facet_return_parent: None, voice_query: None, + conversation: None, + conversation_model_id: None, + conversation_id: None, collection: None, - rerank_hybrid_matches: None, x_typesense_api_key: None, + rerank_hybrid_matches: None, } } } diff --git a/typesense_codegen/src/models/multi_search_parameters.rs b/typesense_codegen/src/models/multi_search_parameters.rs index 6de9b21..1ec2049 100644 --- a/typesense_codegen/src/models/multi_search_parameters.rs +++ b/typesense_codegen/src/models/multi_search_parameters.rs @@ -14,263 +14,263 @@ use serde::{Deserialize, Serialize}; /// MultiSearchParameters : Parameters for the multi search API. #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct MultiSearchParameters { - /// The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. - #[serde(rename = "cache_ttl", skip_serializing_if = "Option::is_none")] - pub cache_ttl: Option, - /// Enable conversational search. - #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] - pub conversation: Option, - /// The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. - #[serde(rename = "conversation_id", skip_serializing_if = "Option::is_none")] - pub conversation_id: Option, - /// The Id of Conversation Model to be used. - #[serde(rename = "conversation_model_id", skip_serializing_if = "Option::is_none")] - pub conversation_model_id: Option, - #[serde(rename = "drop_tokens_mode", skip_serializing_if = "Option::is_none")] - pub drop_tokens_mode: Option, - /// If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 - #[serde(rename = "drop_tokens_threshold", skip_serializing_if = "Option::is_none")] - pub drop_tokens_threshold: Option, - /// If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false - #[serde(rename = "enable_overrides", skip_serializing_if = "Option::is_none")] - pub enable_overrides: Option, - /// If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true - #[serde(rename = "enable_synonyms", skip_serializing_if = "Option::is_none")] - pub enable_synonyms: Option, - /// Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. - #[serde(rename = "enable_typos_for_alpha_numerical_tokens", skip_serializing_if = "Option::is_none")] - pub enable_typos_for_alpha_numerical_tokens: Option, - /// Make Typesense disable typos for numerical tokens. - #[serde(rename = "enable_typos_for_numerical_tokens", skip_serializing_if = "Option::is_none")] - pub enable_typos_for_numerical_tokens: Option, - /// List of fields from the document to exclude in the search result - #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] - pub exclude_fields: Option, - /// Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). - #[serde(rename = "exhaustive_search", skip_serializing_if = "Option::is_none")] - pub exhaustive_search: Option, - /// A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. - #[serde(rename = "facet_by", skip_serializing_if = "Option::is_none")] - pub facet_by: Option, - /// Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". - #[serde(rename = "facet_query", skip_serializing_if = "Option::is_none")] - pub facet_query: Option, - /// Comma separated string of nested facet fields whose parent object should be returned in facet response. - #[serde(rename = "facet_return_parent", skip_serializing_if = "Option::is_none")] - pub facet_return_parent: Option, - /// Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). - #[serde(rename = "facet_strategy", skip_serializing_if = "Option::is_none")] - pub facet_strategy: Option, - /// Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. - #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, - /// Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false - #[serde(rename = "filter_curated_hits", skip_serializing_if = "Option::is_none")] - pub filter_curated_hits: Option, - /// You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. - #[serde(rename = "group_by", skip_serializing_if = "Option::is_none")] - pub group_by: Option, - /// Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 - #[serde(rename = "group_limit", skip_serializing_if = "Option::is_none")] - pub group_limit: Option, - /// Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true - #[serde(rename = "group_missing_values", skip_serializing_if = "Option::is_none")] - pub group_missing_values: Option, - /// A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - #[serde(rename = "hidden_hits", skip_serializing_if = "Option::is_none")] - pub hidden_hits: Option, - /// The number of tokens that should surround the highlighted text on each side. Default: 4 - #[serde(rename = "highlight_affix_num_tokens", skip_serializing_if = "Option::is_none")] - pub highlight_affix_num_tokens: Option, - /// The end tag used for the highlighted snippets. Default: `` - #[serde(rename = "highlight_end_tag", skip_serializing_if = "Option::is_none")] - pub highlight_end_tag: Option, - /// A list of custom fields that must be highlighted even if you don't query for them - #[serde(rename = "highlight_fields", skip_serializing_if = "Option::is_none")] - pub highlight_fields: Option, - /// List of fields which should be highlighted fully without snippeting - #[serde(rename = "highlight_full_fields", skip_serializing_if = "Option::is_none")] - pub highlight_full_fields: Option, - /// The start tag used for the highlighted snippets. Default: `` - #[serde(rename = "highlight_start_tag", skip_serializing_if = "Option::is_none")] - pub highlight_start_tag: Option, - /// List of fields from the document to include in the search result - #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] - pub include_fields: Option, + /// The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. + #[serde(rename = "q", skip_serializing_if = "Option::is_none")] + pub q: Option, + /// A list of `string` fields that should be queried against. Multiple fields are separated with a comma. + #[serde(rename = "query_by", skip_serializing_if = "Option::is_none")] + pub query_by: Option, + /// The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. + #[serde(rename = "query_by_weights", skip_serializing_if = "Option::is_none")] + pub query_by_weights: Option, + /// In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. + #[serde(rename = "text_match_type", skip_serializing_if = "Option::is_none")] + pub text_match_type: Option, + /// Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. + #[serde(rename = "prefix", skip_serializing_if = "Option::is_none")] + pub prefix: Option, /// If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results #[serde(rename = "infix", skip_serializing_if = "Option::is_none")] pub infix: Option, - /// Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. - #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] - pub limit: Option, /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. #[serde(rename = "max_extra_prefix", skip_serializing_if = "Option::is_none")] pub max_extra_prefix: Option, /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. #[serde(rename = "max_extra_suffix", skip_serializing_if = "Option::is_none")] pub max_extra_suffix: Option, + /// Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. + #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] + pub filter_by: Option, + /// A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` + #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] + pub sort_by: Option, + /// A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. + #[serde(rename = "facet_by", skip_serializing_if = "Option::is_none")] + pub facet_by: Option, /// Maximum number of facet values to be returned. #[serde(rename = "max_facet_values", skip_serializing_if = "Option::is_none")] pub max_facet_values: Option, - /// Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - #[serde(rename = "min_len_1typo", skip_serializing_if = "Option::is_none")] - pub min_len_1typo: Option, - /// Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - #[serde(rename = "min_len_2typo", skip_serializing_if = "Option::is_none")] - pub min_len_2typo: Option, + /// Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". + #[serde(rename = "facet_query", skip_serializing_if = "Option::is_none")] + pub facet_query: Option, /// The number of typographical errors (1 or 2) that would be tolerated. Default: 2 #[serde(rename = "num_typos", skip_serializing_if = "Option::is_none")] pub num_typos: Option, - /// Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. - #[serde(rename = "offset", skip_serializing_if = "Option::is_none")] - pub offset: Option, - /// Comma separated list of tags to trigger the curations rules that match the tags. - #[serde(rename = "override_tags", skip_serializing_if = "Option::is_none")] - pub override_tags: Option, /// Results from this specific page number would be fetched. #[serde(rename = "page", skip_serializing_if = "Option::is_none")] pub page: Option, /// Number of results to fetch per page. Default: 10 #[serde(rename = "per_page", skip_serializing_if = "Option::is_none")] pub per_page: Option, + /// Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. + #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. + #[serde(rename = "offset", skip_serializing_if = "Option::is_none")] + pub offset: Option, + /// You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. + #[serde(rename = "group_by", skip_serializing_if = "Option::is_none")] + pub group_by: Option, + /// Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 + #[serde(rename = "group_limit", skip_serializing_if = "Option::is_none")] + pub group_limit: Option, + /// Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true + #[serde(rename = "group_missing_values", skip_serializing_if = "Option::is_none")] + pub group_missing_values: Option, + /// List of fields from the document to include in the search result + #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] + pub include_fields: Option, + /// List of fields from the document to exclude in the search result + #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] + pub exclude_fields: Option, + /// List of fields which should be highlighted fully without snippeting + #[serde(rename = "highlight_full_fields", skip_serializing_if = "Option::is_none")] + pub highlight_full_fields: Option, + /// The number of tokens that should surround the highlighted text on each side. Default: 4 + #[serde(rename = "highlight_affix_num_tokens", skip_serializing_if = "Option::is_none")] + pub highlight_affix_num_tokens: Option, + /// The start tag used for the highlighted snippets. Default: `` + #[serde(rename = "highlight_start_tag", skip_serializing_if = "Option::is_none")] + pub highlight_start_tag: Option, + /// The end tag used for the highlighted snippets. Default: `` + #[serde(rename = "highlight_end_tag", skip_serializing_if = "Option::is_none")] + pub highlight_end_tag: Option, + /// Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 + #[serde(rename = "snippet_threshold", skip_serializing_if = "Option::is_none")] + pub snippet_threshold: Option, + /// If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 + #[serde(rename = "drop_tokens_threshold", skip_serializing_if = "Option::is_none")] + pub drop_tokens_threshold: Option, + #[serde(rename = "drop_tokens_mode", skip_serializing_if = "Option::is_none")] + pub drop_tokens_mode: Option, + /// If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 + #[serde(rename = "typo_tokens_threshold", skip_serializing_if = "Option::is_none")] + pub typo_tokens_threshold: Option, + /// Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. + #[serde(rename = "enable_typos_for_alpha_numerical_tokens", skip_serializing_if = "Option::is_none")] + pub enable_typos_for_alpha_numerical_tokens: Option, + /// Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false + #[serde(rename = "filter_curated_hits", skip_serializing_if = "Option::is_none")] + pub filter_curated_hits: Option, + /// If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true + #[serde(rename = "enable_synonyms", skip_serializing_if = "Option::is_none")] + pub enable_synonyms: Option, + /// Allow synonym resolution on word prefixes in the query. Default: false + #[serde(rename = "synonym_prefix", skip_serializing_if = "Option::is_none")] + pub synonym_prefix: Option, + /// Allow synonym resolution on typo-corrected words in the query. Default: 0 + #[serde(rename = "synonym_num_typos", skip_serializing_if = "Option::is_none")] + pub synonym_num_typos: Option, /// A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. #[serde(rename = "pinned_hits", skip_serializing_if = "Option::is_none")] pub pinned_hits: Option, + /// A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + #[serde(rename = "hidden_hits", skip_serializing_if = "Option::is_none")] + pub hidden_hits: Option, + /// Comma separated list of tags to trigger the curations rules that match the tags. + #[serde(rename = "override_tags", skip_serializing_if = "Option::is_none")] + pub override_tags: Option, + /// A list of custom fields that must be highlighted even if you don't query for them + #[serde(rename = "highlight_fields", skip_serializing_if = "Option::is_none")] + pub highlight_fields: Option, /// You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same #[serde(rename = "pre_segmented_query", skip_serializing_if = "Option::is_none")] pub pre_segmented_query: Option, - /// Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. - #[serde(rename = "prefix", skip_serializing_if = "Option::is_none")] - pub prefix: Option, /// Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. #[serde(rename = "preset", skip_serializing_if = "Option::is_none")] pub preset: Option, + /// If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false + #[serde(rename = "enable_overrides", skip_serializing_if = "Option::is_none")] + pub enable_overrides: Option, /// Set this parameter to true to ensure that an exact match is ranked above the others #[serde(rename = "prioritize_exact_match", skip_serializing_if = "Option::is_none")] pub prioritize_exact_match: Option, - /// Make Typesense prioritize documents where the query words appear in more number of fields. - #[serde(rename = "prioritize_num_matching_fields", skip_serializing_if = "Option::is_none")] - pub prioritize_num_matching_fields: Option, /// Make Typesense prioritize documents where the query words appear earlier in the text. #[serde(rename = "prioritize_token_position", skip_serializing_if = "Option::is_none")] pub prioritize_token_position: Option, - /// The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. - #[serde(rename = "q", skip_serializing_if = "Option::is_none")] - pub q: Option, - /// A list of `string` fields that should be queried against. Multiple fields are separated with a comma. - #[serde(rename = "query_by", skip_serializing_if = "Option::is_none")] - pub query_by: Option, - /// The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. - #[serde(rename = "query_by_weights", skip_serializing_if = "Option::is_none")] - pub query_by_weights: Option, - /// Number of times to retry fetching remote embeddings. - #[serde(rename = "remote_embedding_num_tries", skip_serializing_if = "Option::is_none")] - pub remote_embedding_num_tries: Option, - /// Timeout (in milliseconds) for fetching remote embeddings. - #[serde(rename = "remote_embedding_timeout_ms", skip_serializing_if = "Option::is_none")] - pub remote_embedding_timeout_ms: Option, + /// Make Typesense prioritize documents where the query words appear in more number of fields. + #[serde(rename = "prioritize_num_matching_fields", skip_serializing_if = "Option::is_none")] + pub prioritize_num_matching_fields: Option, + /// Make Typesense disable typos for numerical tokens. + #[serde(rename = "enable_typos_for_numerical_tokens", skip_serializing_if = "Option::is_none")] + pub enable_typos_for_numerical_tokens: Option, + /// Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). + #[serde(rename = "exhaustive_search", skip_serializing_if = "Option::is_none")] + pub exhaustive_search: Option, /// Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. #[serde(rename = "search_cutoff_ms", skip_serializing_if = "Option::is_none")] pub search_cutoff_ms: Option, - /// Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 - #[serde(rename = "snippet_threshold", skip_serializing_if = "Option::is_none")] - pub snippet_threshold: Option, - /// A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` - #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] - pub sort_by: Option, - /// Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. - #[serde(rename = "stopwords", skip_serializing_if = "Option::is_none")] - pub stopwords: Option, - /// Allow synonym resolution on typo-corrected words in the query. Default: 0 - #[serde(rename = "synonym_num_typos", skip_serializing_if = "Option::is_none")] - pub synonym_num_typos: Option, - /// Allow synonym resolution on word prefixes in the query. Default: false - #[serde(rename = "synonym_prefix", skip_serializing_if = "Option::is_none")] - pub synonym_prefix: Option, - /// In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. - #[serde(rename = "text_match_type", skip_serializing_if = "Option::is_none")] - pub text_match_type: Option, - /// If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 - #[serde(rename = "typo_tokens_threshold", skip_serializing_if = "Option::is_none")] - pub typo_tokens_threshold: Option, /// Enable server side caching of search query results. By default, caching is disabled. #[serde(rename = "use_cache", skip_serializing_if = "Option::is_none")] pub use_cache: Option, + /// The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. + #[serde(rename = "cache_ttl", skip_serializing_if = "Option::is_none")] + pub cache_ttl: Option, + /// Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + #[serde(rename = "min_len_1typo", skip_serializing_if = "Option::is_none")] + pub min_len_1typo: Option, + /// Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + #[serde(rename = "min_len_2typo", skip_serializing_if = "Option::is_none")] + pub min_len_2typo: Option, /// Vector query expression for fetching documents \"closest\" to a given query/document vector. #[serde(rename = "vector_query", skip_serializing_if = "Option::is_none")] pub vector_query: Option, + /// Timeout (in milliseconds) for fetching remote embeddings. + #[serde(rename = "remote_embedding_timeout_ms", skip_serializing_if = "Option::is_none")] + pub remote_embedding_timeout_ms: Option, + /// Number of times to retry fetching remote embeddings. + #[serde(rename = "remote_embedding_num_tries", skip_serializing_if = "Option::is_none")] + pub remote_embedding_num_tries: Option, + /// Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). + #[serde(rename = "facet_strategy", skip_serializing_if = "Option::is_none")] + pub facet_strategy: Option, + /// Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. + #[serde(rename = "stopwords", skip_serializing_if = "Option::is_none")] + pub stopwords: Option, + /// Comma separated string of nested facet fields whose parent object should be returned in facet response. + #[serde(rename = "facet_return_parent", skip_serializing_if = "Option::is_none")] + pub facet_return_parent: Option, /// The base64 encoded audio file in 16 khz 16-bit WAV format. #[serde(rename = "voice_query", skip_serializing_if = "Option::is_none")] pub voice_query: Option, + /// Enable conversational search. + #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] + pub conversation: Option, + /// The Id of Conversation Model to be used. + #[serde(rename = "conversation_model_id", skip_serializing_if = "Option::is_none")] + pub conversation_model_id: Option, + /// The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. + #[serde(rename = "conversation_id", skip_serializing_if = "Option::is_none")] + pub conversation_id: Option, } impl MultiSearchParameters { /// Parameters for the multi search API. pub fn new() -> MultiSearchParameters { MultiSearchParameters { - cache_ttl: None, - conversation: None, - conversation_id: None, - conversation_model_id: None, - drop_tokens_mode: None, - drop_tokens_threshold: None, - enable_overrides: None, - enable_synonyms: None, - enable_typos_for_alpha_numerical_tokens: None, - enable_typos_for_numerical_tokens: None, - exclude_fields: None, - exhaustive_search: None, - facet_by: None, - facet_query: None, - facet_return_parent: None, - facet_strategy: None, - filter_by: None, - filter_curated_hits: None, - group_by: None, - group_limit: None, - group_missing_values: None, - hidden_hits: None, - highlight_affix_num_tokens: None, - highlight_end_tag: None, - highlight_fields: None, - highlight_full_fields: None, - highlight_start_tag: None, - include_fields: None, + q: None, + query_by: None, + query_by_weights: None, + text_match_type: None, + prefix: None, infix: None, - limit: None, max_extra_prefix: None, max_extra_suffix: None, + filter_by: None, + sort_by: None, + facet_by: None, max_facet_values: None, - min_len_1typo: None, - min_len_2typo: None, + facet_query: None, num_typos: None, - offset: None, - override_tags: None, page: None, per_page: None, + limit: None, + offset: None, + group_by: None, + group_limit: None, + group_missing_values: None, + include_fields: None, + exclude_fields: None, + highlight_full_fields: None, + highlight_affix_num_tokens: None, + highlight_start_tag: None, + highlight_end_tag: None, + snippet_threshold: None, + drop_tokens_threshold: None, + drop_tokens_mode: None, + typo_tokens_threshold: None, + enable_typos_for_alpha_numerical_tokens: None, + filter_curated_hits: None, + enable_synonyms: None, + synonym_prefix: None, + synonym_num_typos: None, pinned_hits: None, + hidden_hits: None, + override_tags: None, + highlight_fields: None, pre_segmented_query: None, - prefix: None, preset: None, + enable_overrides: None, prioritize_exact_match: None, - prioritize_num_matching_fields: None, prioritize_token_position: None, - q: None, - query_by: None, - query_by_weights: None, - remote_embedding_num_tries: None, - remote_embedding_timeout_ms: None, + prioritize_num_matching_fields: None, + enable_typos_for_numerical_tokens: None, + exhaustive_search: None, search_cutoff_ms: None, - snippet_threshold: None, - sort_by: None, - stopwords: None, - synonym_num_typos: None, - synonym_prefix: None, - text_match_type: None, - typo_tokens_threshold: None, use_cache: None, + cache_ttl: None, + min_len_1typo: None, + min_len_2typo: None, vector_query: None, + remote_embedding_timeout_ms: None, + remote_embedding_num_tries: None, + facet_strategy: None, + stopwords: None, + facet_return_parent: None, voice_query: None, + conversation: None, + conversation_model_id: None, + conversation_id: None, } } } diff --git a/typesense_codegen/src/models/multi_search_result.rs b/typesense_codegen/src/models/multi_search_result.rs index f1cc224..aa6038e 100644 --- a/typesense_codegen/src/models/multi_search_result.rs +++ b/typesense_codegen/src/models/multi_search_result.rs @@ -13,17 +13,17 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct MultiSearchResult { - #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] - pub conversation: Option>, #[serde(rename = "results")] pub results: Vec, + #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] + pub conversation: Option>, } impl MultiSearchResult { pub fn new(results: Vec) -> MultiSearchResult { MultiSearchResult { - conversation: None, results, + conversation: None, } } } diff --git a/typesense_codegen/src/models/multi_search_result_item.rs b/typesense_codegen/src/models/multi_search_result_item.rs index 8738365..c09620a 100644 --- a/typesense_codegen/src/models/multi_search_result_item.rs +++ b/typesense_codegen/src/models/multi_search_result_item.rs @@ -13,8 +13,6 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct MultiSearchResultItem { - #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] - pub conversation: Option>, #[serde(rename = "facet_counts", skip_serializing_if = "Option::is_none")] pub facet_counts: Option>, /// The number of documents found @@ -22,25 +20,27 @@ pub struct MultiSearchResultItem { pub found: Option, #[serde(rename = "found_docs", skip_serializing_if = "Option::is_none")] pub found_docs: Option, - #[serde(rename = "grouped_hits", skip_serializing_if = "Option::is_none")] - pub grouped_hits: Option>, - /// The documents that matched the search query - #[serde(rename = "hits", skip_serializing_if = "Option::is_none")] - pub hits: Option>, + /// The number of milliseconds the search took + #[serde(rename = "search_time_ms", skip_serializing_if = "Option::is_none")] + pub search_time_ms: Option, /// The total number of documents in the collection #[serde(rename = "out_of", skip_serializing_if = "Option::is_none")] pub out_of: Option, + /// Whether the search was cut off + #[serde(rename = "search_cutoff", skip_serializing_if = "Option::is_none")] + pub search_cutoff: Option, /// The search result page number #[serde(rename = "page", skip_serializing_if = "Option::is_none")] pub page: Option, + #[serde(rename = "grouped_hits", skip_serializing_if = "Option::is_none")] + pub grouped_hits: Option>, + /// The documents that matched the search query + #[serde(rename = "hits", skip_serializing_if = "Option::is_none")] + pub hits: Option>, #[serde(rename = "request_params", skip_serializing_if = "Option::is_none")] pub request_params: Option>, - /// Whether the search was cut off - #[serde(rename = "search_cutoff", skip_serializing_if = "Option::is_none")] - pub search_cutoff: Option, - /// The number of milliseconds the search took - #[serde(rename = "search_time_ms", skip_serializing_if = "Option::is_none")] - pub search_time_ms: Option, + #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] + pub conversation: Option>, /// HTTP error code #[serde(rename = "code", skip_serializing_if = "Option::is_none")] pub code: Option, @@ -52,17 +52,17 @@ pub struct MultiSearchResultItem { impl MultiSearchResultItem { pub fn new() -> MultiSearchResultItem { MultiSearchResultItem { - conversation: None, facet_counts: None, found: None, found_docs: None, - grouped_hits: None, - hits: None, + search_time_ms: None, out_of: None, + search_cutoff: None, page: None, + grouped_hits: None, + hits: None, request_params: None, - search_cutoff: None, - search_time_ms: None, + conversation: None, code: None, error: None, } diff --git a/typesense_codegen/src/models/multi_search_searches_parameter.rs b/typesense_codegen/src/models/multi_search_searches_parameter.rs index 3c3fff2..7d165be 100644 --- a/typesense_codegen/src/models/multi_search_searches_parameter.rs +++ b/typesense_codegen/src/models/multi_search_searches_parameter.rs @@ -13,18 +13,18 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct MultiSearchSearchesParameter { - #[serde(rename = "searches")] - pub searches: Vec, /// When true, merges the search results from each search query into a single ordered set of hits. #[serde(rename = "union", skip_serializing_if = "Option::is_none")] pub union: Option, + #[serde(rename = "searches")] + pub searches: Vec, } impl MultiSearchSearchesParameter { pub fn new(searches: Vec) -> MultiSearchSearchesParameter { MultiSearchSearchesParameter { - searches, union: None, + searches, } } } diff --git a/typesense_codegen/src/models/nl_search_model_base.rs b/typesense_codegen/src/models/nl_search_model_base.rs new file mode 100644 index 0000000..bec4058 --- /dev/null +++ b/typesense_codegen/src/models/nl_search_model_base.rs @@ -0,0 +1,96 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct NlSearchModelBase { + /// Name of the NL model to use + #[serde(rename = "model_name", skip_serializing_if = "Option::is_none")] + pub model_name: Option, + /// API key for the NL model service + #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] + pub api_key: Option, + /// Custom API URL for the NL model service + #[serde(rename = "api_url", skip_serializing_if = "Option::is_none")] + pub api_url: Option, + /// Maximum number of bytes to process + #[serde(rename = "max_bytes", skip_serializing_if = "Option::is_none")] + pub max_bytes: Option, + /// Temperature parameter for the NL model + #[serde(rename = "temperature", skip_serializing_if = "Option::is_none")] + pub temperature: Option, + /// System prompt for the NL model + #[serde(rename = "system_prompt", skip_serializing_if = "Option::is_none")] + pub system_prompt: Option, + /// Top-p parameter for the NL model (Google-specific) + #[serde(rename = "top_p", skip_serializing_if = "Option::is_none")] + pub top_p: Option, + /// Top-k parameter for the NL model (Google-specific) + #[serde(rename = "top_k", skip_serializing_if = "Option::is_none")] + pub top_k: Option, + /// Stop sequences for the NL model (Google-specific) + #[serde(rename = "stop_sequences", skip_serializing_if = "Option::is_none")] + pub stop_sequences: Option>, + /// API version for the NL model service + #[serde(rename = "api_version", skip_serializing_if = "Option::is_none")] + pub api_version: Option, + /// Project ID for GCP Vertex AI + #[serde(rename = "project_id", skip_serializing_if = "Option::is_none")] + pub project_id: Option, + /// Access token for GCP Vertex AI + #[serde(rename = "access_token", skip_serializing_if = "Option::is_none")] + pub access_token: Option, + /// Refresh token for GCP Vertex AI + #[serde(rename = "refresh_token", skip_serializing_if = "Option::is_none")] + pub refresh_token: Option, + /// Client ID for GCP Vertex AI + #[serde(rename = "client_id", skip_serializing_if = "Option::is_none")] + pub client_id: Option, + /// Client secret for GCP Vertex AI + #[serde(rename = "client_secret", skip_serializing_if = "Option::is_none")] + pub client_secret: Option, + /// Region for GCP Vertex AI + #[serde(rename = "region", skip_serializing_if = "Option::is_none")] + pub region: Option, + /// Maximum output tokens for GCP Vertex AI + #[serde(rename = "max_output_tokens", skip_serializing_if = "Option::is_none")] + pub max_output_tokens: Option, + /// Account ID for Cloudflare-specific models + #[serde(rename = "account_id", skip_serializing_if = "Option::is_none")] + pub account_id: Option, +} + +impl NlSearchModelBase { + pub fn new() -> NlSearchModelBase { + NlSearchModelBase { + model_name: None, + api_key: None, + api_url: None, + max_bytes: None, + temperature: None, + system_prompt: None, + top_p: None, + top_k: None, + stop_sequences: None, + api_version: None, + project_id: None, + access_token: None, + refresh_token: None, + client_id: None, + client_secret: None, + region: None, + max_output_tokens: None, + account_id: None, + } + } +} + diff --git a/typesense_codegen/src/models/nl_search_model_create_schema.rs b/typesense_codegen/src/models/nl_search_model_create_schema.rs new file mode 100644 index 0000000..383b456 --- /dev/null +++ b/typesense_codegen/src/models/nl_search_model_create_schema.rs @@ -0,0 +1,100 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct NlSearchModelCreateSchema { + /// Name of the NL model to use + #[serde(rename = "model_name", skip_serializing_if = "Option::is_none")] + pub model_name: Option, + /// API key for the NL model service + #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] + pub api_key: Option, + /// Custom API URL for the NL model service + #[serde(rename = "api_url", skip_serializing_if = "Option::is_none")] + pub api_url: Option, + /// Maximum number of bytes to process + #[serde(rename = "max_bytes", skip_serializing_if = "Option::is_none")] + pub max_bytes: Option, + /// Temperature parameter for the NL model + #[serde(rename = "temperature", skip_serializing_if = "Option::is_none")] + pub temperature: Option, + /// System prompt for the NL model + #[serde(rename = "system_prompt", skip_serializing_if = "Option::is_none")] + pub system_prompt: Option, + /// Top-p parameter for the NL model (Google-specific) + #[serde(rename = "top_p", skip_serializing_if = "Option::is_none")] + pub top_p: Option, + /// Top-k parameter for the NL model (Google-specific) + #[serde(rename = "top_k", skip_serializing_if = "Option::is_none")] + pub top_k: Option, + /// Stop sequences for the NL model (Google-specific) + #[serde(rename = "stop_sequences", skip_serializing_if = "Option::is_none")] + pub stop_sequences: Option>, + /// API version for the NL model service + #[serde(rename = "api_version", skip_serializing_if = "Option::is_none")] + pub api_version: Option, + /// Project ID for GCP Vertex AI + #[serde(rename = "project_id", skip_serializing_if = "Option::is_none")] + pub project_id: Option, + /// Access token for GCP Vertex AI + #[serde(rename = "access_token", skip_serializing_if = "Option::is_none")] + pub access_token: Option, + /// Refresh token for GCP Vertex AI + #[serde(rename = "refresh_token", skip_serializing_if = "Option::is_none")] + pub refresh_token: Option, + /// Client ID for GCP Vertex AI + #[serde(rename = "client_id", skip_serializing_if = "Option::is_none")] + pub client_id: Option, + /// Client secret for GCP Vertex AI + #[serde(rename = "client_secret", skip_serializing_if = "Option::is_none")] + pub client_secret: Option, + /// Region for GCP Vertex AI + #[serde(rename = "region", skip_serializing_if = "Option::is_none")] + pub region: Option, + /// Maximum output tokens for GCP Vertex AI + #[serde(rename = "max_output_tokens", skip_serializing_if = "Option::is_none")] + pub max_output_tokens: Option, + /// Account ID for Cloudflare-specific models + #[serde(rename = "account_id", skip_serializing_if = "Option::is_none")] + pub account_id: Option, + /// Optional ID for the NL search model + #[serde(rename = "id", skip_serializing_if = "Option::is_none")] + pub id: Option, +} + +impl NlSearchModelCreateSchema { + pub fn new() -> NlSearchModelCreateSchema { + NlSearchModelCreateSchema { + model_name: None, + api_key: None, + api_url: None, + max_bytes: None, + temperature: None, + system_prompt: None, + top_p: None, + top_k: None, + stop_sequences: None, + api_version: None, + project_id: None, + access_token: None, + refresh_token: None, + client_id: None, + client_secret: None, + region: None, + max_output_tokens: None, + account_id: None, + id: None, + } + } +} + diff --git a/typesense_codegen/src/models/nl_search_model_delete_schema.rs b/typesense_codegen/src/models/nl_search_model_delete_schema.rs new file mode 100644 index 0000000..6864ee1 --- /dev/null +++ b/typesense_codegen/src/models/nl_search_model_delete_schema.rs @@ -0,0 +1,28 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct NlSearchModelDeleteSchema { + /// ID of the deleted NL search model + #[serde(rename = "id")] + pub id: String, +} + +impl NlSearchModelDeleteSchema { + pub fn new(id: String) -> NlSearchModelDeleteSchema { + NlSearchModelDeleteSchema { + id, + } + } +} + diff --git a/typesense_codegen/src/models/nl_search_model_schema.rs b/typesense_codegen/src/models/nl_search_model_schema.rs new file mode 100644 index 0000000..22d656e --- /dev/null +++ b/typesense_codegen/src/models/nl_search_model_schema.rs @@ -0,0 +1,100 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct NlSearchModelSchema { + /// Name of the NL model to use + #[serde(rename = "model_name", skip_serializing_if = "Option::is_none")] + pub model_name: Option, + /// API key for the NL model service + #[serde(rename = "api_key", skip_serializing_if = "Option::is_none")] + pub api_key: Option, + /// Custom API URL for the NL model service + #[serde(rename = "api_url", skip_serializing_if = "Option::is_none")] + pub api_url: Option, + /// Maximum number of bytes to process + #[serde(rename = "max_bytes", skip_serializing_if = "Option::is_none")] + pub max_bytes: Option, + /// Temperature parameter for the NL model + #[serde(rename = "temperature", skip_serializing_if = "Option::is_none")] + pub temperature: Option, + /// System prompt for the NL model + #[serde(rename = "system_prompt", skip_serializing_if = "Option::is_none")] + pub system_prompt: Option, + /// Top-p parameter for the NL model (Google-specific) + #[serde(rename = "top_p", skip_serializing_if = "Option::is_none")] + pub top_p: Option, + /// Top-k parameter for the NL model (Google-specific) + #[serde(rename = "top_k", skip_serializing_if = "Option::is_none")] + pub top_k: Option, + /// Stop sequences for the NL model (Google-specific) + #[serde(rename = "stop_sequences", skip_serializing_if = "Option::is_none")] + pub stop_sequences: Option>, + /// API version for the NL model service + #[serde(rename = "api_version", skip_serializing_if = "Option::is_none")] + pub api_version: Option, + /// Project ID for GCP Vertex AI + #[serde(rename = "project_id", skip_serializing_if = "Option::is_none")] + pub project_id: Option, + /// Access token for GCP Vertex AI + #[serde(rename = "access_token", skip_serializing_if = "Option::is_none")] + pub access_token: Option, + /// Refresh token for GCP Vertex AI + #[serde(rename = "refresh_token", skip_serializing_if = "Option::is_none")] + pub refresh_token: Option, + /// Client ID for GCP Vertex AI + #[serde(rename = "client_id", skip_serializing_if = "Option::is_none")] + pub client_id: Option, + /// Client secret for GCP Vertex AI + #[serde(rename = "client_secret", skip_serializing_if = "Option::is_none")] + pub client_secret: Option, + /// Region for GCP Vertex AI + #[serde(rename = "region", skip_serializing_if = "Option::is_none")] + pub region: Option, + /// Maximum output tokens for GCP Vertex AI + #[serde(rename = "max_output_tokens", skip_serializing_if = "Option::is_none")] + pub max_output_tokens: Option, + /// Account ID for Cloudflare-specific models + #[serde(rename = "account_id", skip_serializing_if = "Option::is_none")] + pub account_id: Option, + /// ID of the NL search model + #[serde(rename = "id")] + pub id: String, +} + +impl NlSearchModelSchema { + pub fn new(id: String) -> NlSearchModelSchema { + NlSearchModelSchema { + model_name: None, + api_key: None, + api_url: None, + max_bytes: None, + temperature: None, + system_prompt: None, + top_p: None, + top_k: None, + stop_sequences: None, + api_version: None, + project_id: None, + access_token: None, + refresh_token: None, + client_id: None, + client_secret: None, + region: None, + max_output_tokens: None, + account_id: None, + id, + } + } +} + diff --git a/typesense_codegen/src/models/schema_change_status.rs b/typesense_codegen/src/models/schema_change_status.rs index 3914832..ea0b52a 100644 --- a/typesense_codegen/src/models/schema_change_status.rs +++ b/typesense_codegen/src/models/schema_change_status.rs @@ -13,23 +13,23 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SchemaChangeStatus { - /// Number of documents that have been altered - #[serde(rename = "altered_docs", skip_serializing_if = "Option::is_none")] - pub altered_docs: Option, /// Name of the collection being modified #[serde(rename = "collection", skip_serializing_if = "Option::is_none")] pub collection: Option, /// Number of documents that have been validated #[serde(rename = "validated_docs", skip_serializing_if = "Option::is_none")] pub validated_docs: Option, + /// Number of documents that have been altered + #[serde(rename = "altered_docs", skip_serializing_if = "Option::is_none")] + pub altered_docs: Option, } impl SchemaChangeStatus { pub fn new() -> SchemaChangeStatus { SchemaChangeStatus { - altered_docs: None, collection: None, validated_docs: None, + altered_docs: None, } } } diff --git a/typesense_codegen/src/models/scoped_key_parameters.rs b/typesense_codegen/src/models/scoped_key_parameters.rs index c777764..62a11af 100644 --- a/typesense_codegen/src/models/scoped_key_parameters.rs +++ b/typesense_codegen/src/models/scoped_key_parameters.rs @@ -13,17 +13,17 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct ScopedKeyParameters { - #[serde(rename = "expires_at", skip_serializing_if = "Option::is_none")] - pub expires_at: Option, #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] pub filter_by: Option, + #[serde(rename = "expires_at", skip_serializing_if = "Option::is_none")] + pub expires_at: Option, } impl ScopedKeyParameters { pub fn new() -> ScopedKeyParameters { ScopedKeyParameters { - expires_at: None, filter_by: None, + expires_at: None, } } } diff --git a/typesense_codegen/src/models/search_highlight.rs b/typesense_codegen/src/models/search_highlight.rs index 829685a..2da96ab 100644 --- a/typesense_codegen/src/models/search_highlight.rs +++ b/typesense_codegen/src/models/search_highlight.rs @@ -15,11 +15,6 @@ use serde::{Deserialize, Serialize}; pub struct SearchHighlight { #[serde(rename = "field", skip_serializing_if = "Option::is_none")] pub field: Option, - /// The indices property will be present only for string[] fields and will contain the corresponding indices of the snippets in the search field - #[serde(rename = "indices", skip_serializing_if = "Option::is_none")] - pub indices: Option>, - #[serde(rename = "matched_tokens", skip_serializing_if = "Option::is_none")] - pub matched_tokens: Option>, /// Present only for (non-array) string fields #[serde(rename = "snippet", skip_serializing_if = "Option::is_none")] pub snippet: Option, @@ -32,18 +27,23 @@ pub struct SearchHighlight { /// Full field value with highlighting, present only for (array) string[] fields #[serde(rename = "values", skip_serializing_if = "Option::is_none")] pub values: Option>, + /// The indices property will be present only for string[] fields and will contain the corresponding indices of the snippets in the search field + #[serde(rename = "indices", skip_serializing_if = "Option::is_none")] + pub indices: Option>, + #[serde(rename = "matched_tokens", skip_serializing_if = "Option::is_none")] + pub matched_tokens: Option>, } impl SearchHighlight { pub fn new() -> SearchHighlight { SearchHighlight { field: None, - indices: None, - matched_tokens: None, snippet: None, snippets: None, value: None, values: None, + indices: None, + matched_tokens: None, } } } diff --git a/typesense_codegen/src/models/search_override.rs b/typesense_codegen/src/models/search_override.rs index ba68c84..bd7250f 100644 --- a/typesense_codegen/src/models/search_override.rs +++ b/typesense_codegen/src/models/search_override.rs @@ -13,38 +13,38 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchOverride { - /// A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. - #[serde(rename = "effective_from_ts", skip_serializing_if = "Option::is_none")] - pub effective_from_ts: Option, - /// A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. - #[serde(rename = "effective_to_ts", skip_serializing_if = "Option::is_none")] - pub effective_to_ts: Option, + #[serde(rename = "rule")] + pub rule: Box, + /// List of document `id`s that should be included in the search results with their corresponding `position`s. + #[serde(rename = "includes", skip_serializing_if = "Option::is_none")] + pub includes: Option>, /// List of document `id`s that should be excluded from the search results. #[serde(rename = "excludes", skip_serializing_if = "Option::is_none")] pub excludes: Option>, /// A filter by clause that is applied to any search query that matches the override rule. #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] pub filter_by: Option, - /// When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. - #[serde(rename = "filter_curated_hits", skip_serializing_if = "Option::is_none")] - pub filter_curated_hits: Option, - /// List of document `id`s that should be included in the search results with their corresponding `position`s. - #[serde(rename = "includes", skip_serializing_if = "Option::is_none")] - pub includes: Option>, - /// Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. - #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] - pub metadata: Option, /// Indicates whether search query tokens that exist in the override's rule should be removed from the search query. #[serde(rename = "remove_matched_tokens", skip_serializing_if = "Option::is_none")] pub remove_matched_tokens: Option, - /// Replaces the current search query with this value, when the search query matches the override rule. - #[serde(rename = "replace_query", skip_serializing_if = "Option::is_none")] - pub replace_query: Option, - #[serde(rename = "rule")] - pub rule: Box, + /// Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option, /// A sort by clause that is applied to any search query that matches the override rule. #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] pub sort_by: Option, + /// Replaces the current search query with this value, when the search query matches the override rule. + #[serde(rename = "replace_query", skip_serializing_if = "Option::is_none")] + pub replace_query: Option, + /// When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. + #[serde(rename = "filter_curated_hits", skip_serializing_if = "Option::is_none")] + pub filter_curated_hits: Option, + /// A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. + #[serde(rename = "effective_from_ts", skip_serializing_if = "Option::is_none")] + pub effective_from_ts: Option, + /// A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. + #[serde(rename = "effective_to_ts", skip_serializing_if = "Option::is_none")] + pub effective_to_ts: Option, /// When set to true, override processing will stop at the first matching rule. When set to false override processing will continue and multiple override actions will be triggered in sequence. Overrides are processed in the lexical sort order of their id field. Default: true. #[serde(rename = "stop_processing", skip_serializing_if = "Option::is_none")] pub stop_processing: Option, @@ -55,17 +55,17 @@ pub struct SearchOverride { impl SearchOverride { pub fn new(rule: models::SearchOverrideRule, id: String) -> SearchOverride { SearchOverride { - effective_from_ts: None, - effective_to_ts: None, + rule: Box::new(rule), + includes: None, excludes: None, filter_by: None, - filter_curated_hits: None, - includes: None, - metadata: None, remove_matched_tokens: None, - replace_query: None, - rule: Box::new(rule), + metadata: None, sort_by: None, + replace_query: None, + filter_curated_hits: None, + effective_from_ts: None, + effective_to_ts: None, stop_processing: None, id, } diff --git a/typesense_codegen/src/models/search_override_rule.rs b/typesense_codegen/src/models/search_override_rule.rs index a40125a..21d659a 100644 --- a/typesense_codegen/src/models/search_override_rule.rs +++ b/typesense_codegen/src/models/search_override_rule.rs @@ -13,27 +13,27 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchOverrideRule { - /// Indicates that the override should apply when the filter_by parameter in a search query exactly matches the string specified here (including backticks, spaces, brackets, etc). - #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, - /// Indicates whether the match on the query term should be `exact` or `contains`. If we want to match all queries that contained the word `apple`, we will use the `contains` match instead. - #[serde(rename = "match", skip_serializing_if = "Option::is_none")] - pub r#match: Option, - /// Indicates what search queries should be overridden - #[serde(rename = "query", skip_serializing_if = "Option::is_none")] - pub query: Option, /// List of tag values to associate with this override rule. #[serde(rename = "tags", skip_serializing_if = "Option::is_none")] pub tags: Option>, + /// Indicates what search queries should be overridden + #[serde(rename = "query", skip_serializing_if = "Option::is_none")] + pub query: Option, + /// Indicates whether the match on the query term should be `exact` or `contains`. If we want to match all queries that contained the word `apple`, we will use the `contains` match instead. + #[serde(rename = "match", skip_serializing_if = "Option::is_none")] + pub r#match: Option, + /// Indicates that the override should apply when the filter_by parameter in a search query exactly matches the string specified here (including backticks, spaces, brackets, etc). + #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] + pub filter_by: Option, } impl SearchOverrideRule { pub fn new() -> SearchOverrideRule { SearchOverrideRule { - filter_by: None, - r#match: None, - query: None, tags: None, + query: None, + r#match: None, + filter_by: None, } } } diff --git a/typesense_codegen/src/models/search_override_schema.rs b/typesense_codegen/src/models/search_override_schema.rs index 1b1f334..3f7a04f 100644 --- a/typesense_codegen/src/models/search_override_schema.rs +++ b/typesense_codegen/src/models/search_override_schema.rs @@ -13,38 +13,38 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchOverrideSchema { - /// A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. - #[serde(rename = "effective_from_ts", skip_serializing_if = "Option::is_none")] - pub effective_from_ts: Option, - /// A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. - #[serde(rename = "effective_to_ts", skip_serializing_if = "Option::is_none")] - pub effective_to_ts: Option, + #[serde(rename = "rule")] + pub rule: Box, + /// List of document `id`s that should be included in the search results with their corresponding `position`s. + #[serde(rename = "includes", skip_serializing_if = "Option::is_none")] + pub includes: Option>, /// List of document `id`s that should be excluded from the search results. #[serde(rename = "excludes", skip_serializing_if = "Option::is_none")] pub excludes: Option>, /// A filter by clause that is applied to any search query that matches the override rule. #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] pub filter_by: Option, - /// When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. - #[serde(rename = "filter_curated_hits", skip_serializing_if = "Option::is_none")] - pub filter_curated_hits: Option, - /// List of document `id`s that should be included in the search results with their corresponding `position`s. - #[serde(rename = "includes", skip_serializing_if = "Option::is_none")] - pub includes: Option>, - /// Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. - #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] - pub metadata: Option, /// Indicates whether search query tokens that exist in the override's rule should be removed from the search query. #[serde(rename = "remove_matched_tokens", skip_serializing_if = "Option::is_none")] pub remove_matched_tokens: Option, - /// Replaces the current search query with this value, when the search query matches the override rule. - #[serde(rename = "replace_query", skip_serializing_if = "Option::is_none")] - pub replace_query: Option, - #[serde(rename = "rule")] - pub rule: Box, + /// Return a custom JSON object in the Search API response, when this rule is triggered. This can can be used to display a pre-defined message (eg: a promotion banner) on the front-end when a particular rule is triggered. + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + pub metadata: Option, /// A sort by clause that is applied to any search query that matches the override rule. #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] pub sort_by: Option, + /// Replaces the current search query with this value, when the search query matches the override rule. + #[serde(rename = "replace_query", skip_serializing_if = "Option::is_none")] + pub replace_query: Option, + /// When set to true, the filter conditions of the query is applied to the curated records as well. Default: false. + #[serde(rename = "filter_curated_hits", skip_serializing_if = "Option::is_none")] + pub filter_curated_hits: Option, + /// A Unix timestamp that indicates the date/time from which the override will be active. You can use this to create override rules that start applying from a future point in time. + #[serde(rename = "effective_from_ts", skip_serializing_if = "Option::is_none")] + pub effective_from_ts: Option, + /// A Unix timestamp that indicates the date/time until which the override will be active. You can use this to create override rules that stop applying after a period of time. + #[serde(rename = "effective_to_ts", skip_serializing_if = "Option::is_none")] + pub effective_to_ts: Option, /// When set to true, override processing will stop at the first matching rule. When set to false override processing will continue and multiple override actions will be triggered in sequence. Overrides are processed in the lexical sort order of their id field. Default: true. #[serde(rename = "stop_processing", skip_serializing_if = "Option::is_none")] pub stop_processing: Option, @@ -53,17 +53,17 @@ pub struct SearchOverrideSchema { impl SearchOverrideSchema { pub fn new(rule: models::SearchOverrideRule) -> SearchOverrideSchema { SearchOverrideSchema { - effective_from_ts: None, - effective_to_ts: None, + rule: Box::new(rule), + includes: None, excludes: None, filter_by: None, - filter_curated_hits: None, - includes: None, - metadata: None, remove_matched_tokens: None, - replace_query: None, - rule: Box::new(rule), + metadata: None, sort_by: None, + replace_query: None, + filter_curated_hits: None, + effective_from_ts: None, + effective_to_ts: None, stop_processing: None, } } diff --git a/typesense_codegen/src/models/search_parameters.rs b/typesense_codegen/src/models/search_parameters.rs index c6bf1c8..839dfe9 100644 --- a/typesense_codegen/src/models/search_parameters.rs +++ b/typesense_codegen/src/models/search_parameters.rs @@ -13,278 +13,286 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchParameters { - /// The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. - #[serde(rename = "cache_ttl", skip_serializing_if = "Option::is_none")] - pub cache_ttl: Option, - /// Enable conversational search. - #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] - pub conversation: Option, - /// The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. - #[serde(rename = "conversation_id", skip_serializing_if = "Option::is_none")] - pub conversation_id: Option, - /// The Id of Conversation Model to be used. - #[serde(rename = "conversation_model_id", skip_serializing_if = "Option::is_none")] - pub conversation_model_id: Option, - #[serde(rename = "drop_tokens_mode", skip_serializing_if = "Option::is_none")] - pub drop_tokens_mode: Option, - /// If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 - #[serde(rename = "drop_tokens_threshold", skip_serializing_if = "Option::is_none")] - pub drop_tokens_threshold: Option, - /// Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true - #[serde(rename = "enable_highlight_v1", skip_serializing_if = "Option::is_none")] - pub enable_highlight_v1: Option, - /// If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false - #[serde(rename = "enable_overrides", skip_serializing_if = "Option::is_none")] - pub enable_overrides: Option, - /// If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true - #[serde(rename = "enable_synonyms", skip_serializing_if = "Option::is_none")] - pub enable_synonyms: Option, - /// Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. - #[serde(rename = "enable_typos_for_alpha_numerical_tokens", skip_serializing_if = "Option::is_none")] - pub enable_typos_for_alpha_numerical_tokens: Option, - /// Make Typesense disable typos for numerical tokens. - #[serde(rename = "enable_typos_for_numerical_tokens", skip_serializing_if = "Option::is_none")] - pub enable_typos_for_numerical_tokens: Option, - /// List of fields from the document to exclude in the search result - #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] - pub exclude_fields: Option, - /// Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). - #[serde(rename = "exhaustive_search", skip_serializing_if = "Option::is_none")] - pub exhaustive_search: Option, - /// A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. - #[serde(rename = "facet_by", skip_serializing_if = "Option::is_none")] - pub facet_by: Option, - /// Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". - #[serde(rename = "facet_query", skip_serializing_if = "Option::is_none")] - pub facet_query: Option, - /// Comma separated string of nested facet fields whose parent object should be returned in facet response. - #[serde(rename = "facet_return_parent", skip_serializing_if = "Option::is_none")] - pub facet_return_parent: Option, - /// Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). - #[serde(rename = "facet_strategy", skip_serializing_if = "Option::is_none")] - pub facet_strategy: Option, - /// Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. - #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] - pub filter_by: Option, - /// Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false - #[serde(rename = "filter_curated_hits", skip_serializing_if = "Option::is_none")] - pub filter_curated_hits: Option, - /// You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. - #[serde(rename = "group_by", skip_serializing_if = "Option::is_none")] - pub group_by: Option, - /// Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 - #[serde(rename = "group_limit", skip_serializing_if = "Option::is_none")] - pub group_limit: Option, - /// Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true - #[serde(rename = "group_missing_values", skip_serializing_if = "Option::is_none")] - pub group_missing_values: Option, - /// A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. - #[serde(rename = "hidden_hits", skip_serializing_if = "Option::is_none")] - pub hidden_hits: Option, - /// The number of tokens that should surround the highlighted text on each side. Default: 4 - #[serde(rename = "highlight_affix_num_tokens", skip_serializing_if = "Option::is_none")] - pub highlight_affix_num_tokens: Option, - /// The end tag used for the highlighted snippets. Default: `` - #[serde(rename = "highlight_end_tag", skip_serializing_if = "Option::is_none")] - pub highlight_end_tag: Option, - /// A list of custom fields that must be highlighted even if you don't query for them - #[serde(rename = "highlight_fields", skip_serializing_if = "Option::is_none")] - pub highlight_fields: Option, - /// List of fields which should be highlighted fully without snippeting - #[serde(rename = "highlight_full_fields", skip_serializing_if = "Option::is_none")] - pub highlight_full_fields: Option, - /// The start tag used for the highlighted snippets. Default: `` - #[serde(rename = "highlight_start_tag", skip_serializing_if = "Option::is_none")] - pub highlight_start_tag: Option, - /// List of fields from the document to include in the search result - #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] - pub include_fields: Option, + /// The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. + #[serde(rename = "q", skip_serializing_if = "Option::is_none")] + pub q: Option, + /// A list of `string` fields that should be queried against. Multiple fields are separated with a comma. + #[serde(rename = "query_by", skip_serializing_if = "Option::is_none")] + pub query_by: Option, + /// Whether to use natural language processing to parse the query. + #[serde(rename = "nl_query", skip_serializing_if = "Option::is_none")] + pub nl_query: Option, + /// The ID of the natural language model to use. + #[serde(rename = "nl_model_id", skip_serializing_if = "Option::is_none")] + pub nl_model_id: Option, + /// The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. + #[serde(rename = "query_by_weights", skip_serializing_if = "Option::is_none")] + pub query_by_weights: Option, + /// In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. + #[serde(rename = "text_match_type", skip_serializing_if = "Option::is_none")] + pub text_match_type: Option, + /// Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. + #[serde(rename = "prefix", skip_serializing_if = "Option::is_none")] + pub prefix: Option, /// If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results #[serde(rename = "infix", skip_serializing_if = "Option::is_none")] pub infix: Option, - /// Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. - #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// Control the number of words that Typesense considers for typo and prefix searching. - #[serde(rename = "max_candidates", skip_serializing_if = "Option::is_none")] - pub max_candidates: Option, /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. #[serde(rename = "max_extra_prefix", skip_serializing_if = "Option::is_none")] pub max_extra_prefix: Option, /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. #[serde(rename = "max_extra_suffix", skip_serializing_if = "Option::is_none")] pub max_extra_suffix: Option, - /// Maximum number of facet values to be returned. - #[serde(rename = "max_facet_values", skip_serializing_if = "Option::is_none")] - pub max_facet_values: Option, + /// Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. + #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] + pub filter_by: Option, /// Controls the number of similar words that Typesense considers during fuzzy search on filter_by values. Useful for controlling prefix matches like company_name:Acm*. #[serde(rename = "max_filter_by_candidates", skip_serializing_if = "Option::is_none")] pub max_filter_by_candidates: Option, - /// Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - #[serde(rename = "min_len_1typo", skip_serializing_if = "Option::is_none")] - pub min_len_1typo: Option, - /// Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. - #[serde(rename = "min_len_2typo", skip_serializing_if = "Option::is_none")] - pub min_len_2typo: Option, + /// A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` + #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] + pub sort_by: Option, + /// A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. + #[serde(rename = "facet_by", skip_serializing_if = "Option::is_none")] + pub facet_by: Option, + /// Maximum number of facet values to be returned. + #[serde(rename = "max_facet_values", skip_serializing_if = "Option::is_none")] + pub max_facet_values: Option, + /// Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". + #[serde(rename = "facet_query", skip_serializing_if = "Option::is_none")] + pub facet_query: Option, /// The number of typographical errors (1 or 2) that would be tolerated. Default: 2 #[serde(rename = "num_typos", skip_serializing_if = "Option::is_none")] pub num_typos: Option, - /// Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. - #[serde(rename = "offset", skip_serializing_if = "Option::is_none")] - pub offset: Option, - /// Comma separated list of tags to trigger the curations rules that match the tags. - #[serde(rename = "override_tags", skip_serializing_if = "Option::is_none")] - pub override_tags: Option, /// Results from this specific page number would be fetched. #[serde(rename = "page", skip_serializing_if = "Option::is_none")] pub page: Option, /// Number of results to fetch per page. Default: 10 #[serde(rename = "per_page", skip_serializing_if = "Option::is_none")] pub per_page: Option, + /// Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. + #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. + #[serde(rename = "offset", skip_serializing_if = "Option::is_none")] + pub offset: Option, + /// You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. + #[serde(rename = "group_by", skip_serializing_if = "Option::is_none")] + pub group_by: Option, + /// Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 + #[serde(rename = "group_limit", skip_serializing_if = "Option::is_none")] + pub group_limit: Option, + /// Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true + #[serde(rename = "group_missing_values", skip_serializing_if = "Option::is_none")] + pub group_missing_values: Option, + /// List of fields from the document to include in the search result + #[serde(rename = "include_fields", skip_serializing_if = "Option::is_none")] + pub include_fields: Option, + /// List of fields from the document to exclude in the search result + #[serde(rename = "exclude_fields", skip_serializing_if = "Option::is_none")] + pub exclude_fields: Option, + /// List of fields which should be highlighted fully without snippeting + #[serde(rename = "highlight_full_fields", skip_serializing_if = "Option::is_none")] + pub highlight_full_fields: Option, + /// The number of tokens that should surround the highlighted text on each side. Default: 4 + #[serde(rename = "highlight_affix_num_tokens", skip_serializing_if = "Option::is_none")] + pub highlight_affix_num_tokens: Option, + /// The start tag used for the highlighted snippets. Default: `` + #[serde(rename = "highlight_start_tag", skip_serializing_if = "Option::is_none")] + pub highlight_start_tag: Option, + /// The end tag used for the highlighted snippets. Default: `` + #[serde(rename = "highlight_end_tag", skip_serializing_if = "Option::is_none")] + pub highlight_end_tag: Option, + /// Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true + #[serde(rename = "enable_highlight_v1", skip_serializing_if = "Option::is_none")] + pub enable_highlight_v1: Option, + /// Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 + #[serde(rename = "snippet_threshold", skip_serializing_if = "Option::is_none")] + pub snippet_threshold: Option, + /// If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 + #[serde(rename = "drop_tokens_threshold", skip_serializing_if = "Option::is_none")] + pub drop_tokens_threshold: Option, + #[serde(rename = "drop_tokens_mode", skip_serializing_if = "Option::is_none")] + pub drop_tokens_mode: Option, + /// If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 + #[serde(rename = "typo_tokens_threshold", skip_serializing_if = "Option::is_none")] + pub typo_tokens_threshold: Option, + /// Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. + #[serde(rename = "enable_typos_for_alpha_numerical_tokens", skip_serializing_if = "Option::is_none")] + pub enable_typos_for_alpha_numerical_tokens: Option, + /// Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false + #[serde(rename = "filter_curated_hits", skip_serializing_if = "Option::is_none")] + pub filter_curated_hits: Option, + /// If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true + #[serde(rename = "enable_synonyms", skip_serializing_if = "Option::is_none")] + pub enable_synonyms: Option, + /// Allow synonym resolution on word prefixes in the query. Default: false + #[serde(rename = "synonym_prefix", skip_serializing_if = "Option::is_none")] + pub synonym_prefix: Option, + /// Allow synonym resolution on typo-corrected words in the query. Default: 0 + #[serde(rename = "synonym_num_typos", skip_serializing_if = "Option::is_none")] + pub synonym_num_typos: Option, /// A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. #[serde(rename = "pinned_hits", skip_serializing_if = "Option::is_none")] pub pinned_hits: Option, + /// A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + #[serde(rename = "hidden_hits", skip_serializing_if = "Option::is_none")] + pub hidden_hits: Option, + /// Comma separated list of tags to trigger the curations rules that match the tags. + #[serde(rename = "override_tags", skip_serializing_if = "Option::is_none")] + pub override_tags: Option, + /// A list of custom fields that must be highlighted even if you don't query for them + #[serde(rename = "highlight_fields", skip_serializing_if = "Option::is_none")] + pub highlight_fields: Option, + /// Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. Splitting/joining of tokens will only be attempted if the original query produces no results. To always trigger this behavior, set value to `always``. To disable, set value to `off`. Default is `fallback`. + #[serde(rename = "split_join_tokens", skip_serializing_if = "Option::is_none")] + pub split_join_tokens: Option, /// You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same #[serde(rename = "pre_segmented_query", skip_serializing_if = "Option::is_none")] pub pre_segmented_query: Option, - /// Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. - #[serde(rename = "prefix", skip_serializing_if = "Option::is_none")] - pub prefix: Option, /// Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. #[serde(rename = "preset", skip_serializing_if = "Option::is_none")] pub preset: Option, + /// If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false + #[serde(rename = "enable_overrides", skip_serializing_if = "Option::is_none")] + pub enable_overrides: Option, /// Set this parameter to true to ensure that an exact match is ranked above the others #[serde(rename = "prioritize_exact_match", skip_serializing_if = "Option::is_none")] pub prioritize_exact_match: Option, - /// Make Typesense prioritize documents where the query words appear in more number of fields. - #[serde(rename = "prioritize_num_matching_fields", skip_serializing_if = "Option::is_none")] - pub prioritize_num_matching_fields: Option, + /// Control the number of words that Typesense considers for typo and prefix searching. + #[serde(rename = "max_candidates", skip_serializing_if = "Option::is_none")] + pub max_candidates: Option, /// Make Typesense prioritize documents where the query words appear earlier in the text. #[serde(rename = "prioritize_token_position", skip_serializing_if = "Option::is_none")] pub prioritize_token_position: Option, - /// The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. - #[serde(rename = "q", skip_serializing_if = "Option::is_none")] - pub q: Option, - /// A list of `string` fields that should be queried against. Multiple fields are separated with a comma. - #[serde(rename = "query_by", skip_serializing_if = "Option::is_none")] - pub query_by: Option, - /// The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. - #[serde(rename = "query_by_weights", skip_serializing_if = "Option::is_none")] - pub query_by_weights: Option, - /// Number of times to retry fetching remote embeddings. - #[serde(rename = "remote_embedding_num_tries", skip_serializing_if = "Option::is_none")] - pub remote_embedding_num_tries: Option, - /// Timeout (in milliseconds) for fetching remote embeddings. - #[serde(rename = "remote_embedding_timeout_ms", skip_serializing_if = "Option::is_none")] - pub remote_embedding_timeout_ms: Option, + /// Make Typesense prioritize documents where the query words appear in more number of fields. + #[serde(rename = "prioritize_num_matching_fields", skip_serializing_if = "Option::is_none")] + pub prioritize_num_matching_fields: Option, + /// Make Typesense disable typos for numerical tokens. + #[serde(rename = "enable_typos_for_numerical_tokens", skip_serializing_if = "Option::is_none")] + pub enable_typos_for_numerical_tokens: Option, + /// Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). + #[serde(rename = "exhaustive_search", skip_serializing_if = "Option::is_none")] + pub exhaustive_search: Option, /// Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. #[serde(rename = "search_cutoff_ms", skip_serializing_if = "Option::is_none")] pub search_cutoff_ms: Option, - /// Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 - #[serde(rename = "snippet_threshold", skip_serializing_if = "Option::is_none")] - pub snippet_threshold: Option, - /// A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` - #[serde(rename = "sort_by", skip_serializing_if = "Option::is_none")] - pub sort_by: Option, - /// Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. Splitting/joining of tokens will only be attempted if the original query produces no results. To always trigger this behavior, set value to `always``. To disable, set value to `off`. Default is `fallback`. - #[serde(rename = "split_join_tokens", skip_serializing_if = "Option::is_none")] - pub split_join_tokens: Option, - /// Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. - #[serde(rename = "stopwords", skip_serializing_if = "Option::is_none")] - pub stopwords: Option, - /// Allow synonym resolution on typo-corrected words in the query. Default: 0 - #[serde(rename = "synonym_num_typos", skip_serializing_if = "Option::is_none")] - pub synonym_num_typos: Option, - /// Allow synonym resolution on word prefixes in the query. Default: false - #[serde(rename = "synonym_prefix", skip_serializing_if = "Option::is_none")] - pub synonym_prefix: Option, - /// In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. - #[serde(rename = "text_match_type", skip_serializing_if = "Option::is_none")] - pub text_match_type: Option, - /// If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 - #[serde(rename = "typo_tokens_threshold", skip_serializing_if = "Option::is_none")] - pub typo_tokens_threshold: Option, /// Enable server side caching of search query results. By default, caching is disabled. #[serde(rename = "use_cache", skip_serializing_if = "Option::is_none")] pub use_cache: Option, + /// The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. + #[serde(rename = "cache_ttl", skip_serializing_if = "Option::is_none")] + pub cache_ttl: Option, + /// Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + #[serde(rename = "min_len_1typo", skip_serializing_if = "Option::is_none")] + pub min_len_1typo: Option, + /// Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + #[serde(rename = "min_len_2typo", skip_serializing_if = "Option::is_none")] + pub min_len_2typo: Option, /// Vector query expression for fetching documents \"closest\" to a given query/document vector. #[serde(rename = "vector_query", skip_serializing_if = "Option::is_none")] pub vector_query: Option, + /// Timeout (in milliseconds) for fetching remote embeddings. + #[serde(rename = "remote_embedding_timeout_ms", skip_serializing_if = "Option::is_none")] + pub remote_embedding_timeout_ms: Option, + /// Number of times to retry fetching remote embeddings. + #[serde(rename = "remote_embedding_num_tries", skip_serializing_if = "Option::is_none")] + pub remote_embedding_num_tries: Option, + /// Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). + #[serde(rename = "facet_strategy", skip_serializing_if = "Option::is_none")] + pub facet_strategy: Option, + /// Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. + #[serde(rename = "stopwords", skip_serializing_if = "Option::is_none")] + pub stopwords: Option, + /// Comma separated string of nested facet fields whose parent object should be returned in facet response. + #[serde(rename = "facet_return_parent", skip_serializing_if = "Option::is_none")] + pub facet_return_parent: Option, /// The base64 encoded audio file in 16 khz 16-bit WAV format. #[serde(rename = "voice_query", skip_serializing_if = "Option::is_none")] pub voice_query: Option, + /// Enable conversational search. + #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] + pub conversation: Option, + /// The Id of Conversation Model to be used. + #[serde(rename = "conversation_model_id", skip_serializing_if = "Option::is_none")] + pub conversation_model_id: Option, + /// The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. + #[serde(rename = "conversation_id", skip_serializing_if = "Option::is_none")] + pub conversation_id: Option, } impl SearchParameters { pub fn new() -> SearchParameters { SearchParameters { - cache_ttl: None, - conversation: None, - conversation_id: None, - conversation_model_id: None, - drop_tokens_mode: None, - drop_tokens_threshold: None, - enable_highlight_v1: None, - enable_overrides: None, - enable_synonyms: None, - enable_typos_for_alpha_numerical_tokens: None, - enable_typos_for_numerical_tokens: None, - exclude_fields: None, - exhaustive_search: None, - facet_by: None, - facet_query: None, - facet_return_parent: None, - facet_strategy: None, - filter_by: None, - filter_curated_hits: None, - group_by: None, - group_limit: None, - group_missing_values: None, - hidden_hits: None, - highlight_affix_num_tokens: None, - highlight_end_tag: None, - highlight_fields: None, - highlight_full_fields: None, - highlight_start_tag: None, - include_fields: None, + q: None, + query_by: None, + nl_query: None, + nl_model_id: None, + query_by_weights: None, + text_match_type: None, + prefix: None, infix: None, - limit: None, - max_candidates: None, max_extra_prefix: None, max_extra_suffix: None, - max_facet_values: None, + filter_by: None, max_filter_by_candidates: None, - min_len_1typo: None, - min_len_2typo: None, + sort_by: None, + facet_by: None, + max_facet_values: None, + facet_query: None, num_typos: None, - offset: None, - override_tags: None, page: None, per_page: None, + limit: None, + offset: None, + group_by: None, + group_limit: None, + group_missing_values: None, + include_fields: None, + exclude_fields: None, + highlight_full_fields: None, + highlight_affix_num_tokens: None, + highlight_start_tag: None, + highlight_end_tag: None, + enable_highlight_v1: None, + snippet_threshold: None, + drop_tokens_threshold: None, + drop_tokens_mode: None, + typo_tokens_threshold: None, + enable_typos_for_alpha_numerical_tokens: None, + filter_curated_hits: None, + enable_synonyms: None, + synonym_prefix: None, + synonym_num_typos: None, pinned_hits: None, + hidden_hits: None, + override_tags: None, + highlight_fields: None, + split_join_tokens: None, pre_segmented_query: None, - prefix: None, preset: None, + enable_overrides: None, prioritize_exact_match: None, - prioritize_num_matching_fields: None, + max_candidates: None, prioritize_token_position: None, - q: None, - query_by: None, - query_by_weights: None, - remote_embedding_num_tries: None, - remote_embedding_timeout_ms: None, + prioritize_num_matching_fields: None, + enable_typos_for_numerical_tokens: None, + exhaustive_search: None, search_cutoff_ms: None, - snippet_threshold: None, - sort_by: None, - split_join_tokens: None, - stopwords: None, - synonym_num_typos: None, - synonym_prefix: None, - text_match_type: None, - typo_tokens_threshold: None, use_cache: None, + cache_ttl: None, + min_len_1typo: None, + min_len_2typo: None, vector_query: None, + remote_embedding_timeout_ms: None, + remote_embedding_num_tries: None, + facet_strategy: None, + stopwords: None, + facet_return_parent: None, voice_query: None, + conversation: None, + conversation_model_id: None, + conversation_id: None, } } } diff --git a/typesense_codegen/src/models/search_result.rs b/typesense_codegen/src/models/search_result.rs index fe158cd..ad5e13f 100644 --- a/typesense_codegen/src/models/search_result.rs +++ b/typesense_codegen/src/models/search_result.rs @@ -13,8 +13,6 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchResult { - #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] - pub conversation: Option>, #[serde(rename = "facet_counts", skip_serializing_if = "Option::is_none")] pub facet_counts: Option>, /// The number of documents found @@ -22,41 +20,43 @@ pub struct SearchResult { pub found: Option, #[serde(rename = "found_docs", skip_serializing_if = "Option::is_none")] pub found_docs: Option, - #[serde(rename = "grouped_hits", skip_serializing_if = "Option::is_none")] - pub grouped_hits: Option>, - /// The documents that matched the search query - #[serde(rename = "hits", skip_serializing_if = "Option::is_none")] - pub hits: Option>, + /// The number of milliseconds the search took + #[serde(rename = "search_time_ms", skip_serializing_if = "Option::is_none")] + pub search_time_ms: Option, /// The total number of documents in the collection #[serde(rename = "out_of", skip_serializing_if = "Option::is_none")] pub out_of: Option, + /// Whether the search was cut off + #[serde(rename = "search_cutoff", skip_serializing_if = "Option::is_none")] + pub search_cutoff: Option, /// The search result page number #[serde(rename = "page", skip_serializing_if = "Option::is_none")] pub page: Option, + #[serde(rename = "grouped_hits", skip_serializing_if = "Option::is_none")] + pub grouped_hits: Option>, + /// The documents that matched the search query + #[serde(rename = "hits", skip_serializing_if = "Option::is_none")] + pub hits: Option>, #[serde(rename = "request_params", skip_serializing_if = "Option::is_none")] pub request_params: Option>, - /// Whether the search was cut off - #[serde(rename = "search_cutoff", skip_serializing_if = "Option::is_none")] - pub search_cutoff: Option, - /// The number of milliseconds the search took - #[serde(rename = "search_time_ms", skip_serializing_if = "Option::is_none")] - pub search_time_ms: Option, + #[serde(rename = "conversation", skip_serializing_if = "Option::is_none")] + pub conversation: Option>, } impl SearchResult { pub fn new() -> SearchResult { SearchResult { - conversation: None, facet_counts: None, found: None, found_docs: None, - grouped_hits: None, - hits: None, + search_time_ms: None, out_of: None, + search_cutoff: None, page: None, + grouped_hits: None, + hits: None, request_params: None, - search_cutoff: None, - search_time_ms: None, + conversation: None, } } } diff --git a/typesense_codegen/src/models/search_result_hit.rs b/typesense_codegen/src/models/search_result_hit.rs index 3b25305..6ba8770 100644 --- a/typesense_codegen/src/models/search_result_hit.rs +++ b/typesense_codegen/src/models/search_result_hit.rs @@ -13,22 +13,22 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchResultHit { - /// Can be any key-value pair - #[serde(rename = "document", skip_serializing_if = "Option::is_none")] - pub document: Option, - /// Can be any key-value pair - #[serde(rename = "geo_distance_meters", skip_serializing_if = "Option::is_none")] - pub geo_distance_meters: Option>, - /// Highlighted version of the matching document - #[serde(rename = "highlight", skip_serializing_if = "Option::is_none")] - pub highlight: Option>, /// (Deprecated) Contains highlighted portions of the search fields #[serde(rename = "highlights", skip_serializing_if = "Option::is_none")] pub highlights: Option>, + /// Highlighted version of the matching document + #[serde(rename = "highlight", skip_serializing_if = "Option::is_none")] + pub highlight: Option>, + /// Can be any key-value pair + #[serde(rename = "document", skip_serializing_if = "Option::is_none")] + pub document: Option, #[serde(rename = "text_match", skip_serializing_if = "Option::is_none")] pub text_match: Option, #[serde(rename = "text_match_info", skip_serializing_if = "Option::is_none")] pub text_match_info: Option>, + /// Can be any key-value pair + #[serde(rename = "geo_distance_meters", skip_serializing_if = "Option::is_none")] + pub geo_distance_meters: Option>, /// Distance between the query vector and matching document's vector value #[serde(rename = "vector_distance", skip_serializing_if = "Option::is_none")] pub vector_distance: Option, @@ -37,12 +37,12 @@ pub struct SearchResultHit { impl SearchResultHit { pub fn new() -> SearchResultHit { SearchResultHit { - document: None, - geo_distance_meters: None, - highlight: None, highlights: None, + highlight: None, + document: None, text_match: None, text_match_info: None, + geo_distance_meters: None, vector_distance: None, } } diff --git a/typesense_codegen/src/models/search_result_request_params.rs b/typesense_codegen/src/models/search_result_request_params.rs index eb44758..73f6294 100644 --- a/typesense_codegen/src/models/search_result_request_params.rs +++ b/typesense_codegen/src/models/search_result_request_params.rs @@ -15,20 +15,20 @@ use serde::{Deserialize, Serialize}; pub struct SearchResultRequestParams { #[serde(rename = "collection_name")] pub collection_name: String, - #[serde(rename = "per_page")] - pub per_page: i32, #[serde(rename = "q")] pub q: String, + #[serde(rename = "per_page")] + pub per_page: i32, #[serde(rename = "voice_query", skip_serializing_if = "Option::is_none")] pub voice_query: Option>, } impl SearchResultRequestParams { - pub fn new(collection_name: String, per_page: i32, q: String) -> SearchResultRequestParams { + pub fn new(collection_name: String, q: String, per_page: i32) -> SearchResultRequestParams { SearchResultRequestParams { collection_name, - per_page, q, + per_page, voice_query: None, } } diff --git a/typesense_codegen/src/models/search_synonym.rs b/typesense_codegen/src/models/search_synonym.rs index a252b0b..18d53fd 100644 --- a/typesense_codegen/src/models/search_synonym.rs +++ b/typesense_codegen/src/models/search_synonym.rs @@ -13,18 +13,18 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchSynonym { - /// Locale for the synonym, leave blank to use the standard tokenizer. - #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] - pub locale: Option, /// For 1-way synonyms, indicates the root word that words in the `synonyms` parameter map to. #[serde(rename = "root", skip_serializing_if = "Option::is_none")] pub root: Option, - /// By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is. - #[serde(rename = "symbols_to_index", skip_serializing_if = "Option::is_none")] - pub symbols_to_index: Option>, /// Array of words that should be considered as synonyms. #[serde(rename = "synonyms")] pub synonyms: Vec, + /// Locale for the synonym, leave blank to use the standard tokenizer. + #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] + pub locale: Option, + /// By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is. + #[serde(rename = "symbols_to_index", skip_serializing_if = "Option::is_none")] + pub symbols_to_index: Option>, #[serde(rename = "id")] pub id: String, } @@ -32,10 +32,10 @@ pub struct SearchSynonym { impl SearchSynonym { pub fn new(synonyms: Vec, id: String) -> SearchSynonym { SearchSynonym { - locale: None, root: None, - symbols_to_index: None, synonyms, + locale: None, + symbols_to_index: None, id, } } diff --git a/typesense_codegen/src/models/search_synonym_schema.rs b/typesense_codegen/src/models/search_synonym_schema.rs index a6d9923..548d405 100644 --- a/typesense_codegen/src/models/search_synonym_schema.rs +++ b/typesense_codegen/src/models/search_synonym_schema.rs @@ -13,27 +13,27 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct SearchSynonymSchema { - /// Locale for the synonym, leave blank to use the standard tokenizer. - #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] - pub locale: Option, /// For 1-way synonyms, indicates the root word that words in the `synonyms` parameter map to. #[serde(rename = "root", skip_serializing_if = "Option::is_none")] pub root: Option, - /// By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is. - #[serde(rename = "symbols_to_index", skip_serializing_if = "Option::is_none")] - pub symbols_to_index: Option>, /// Array of words that should be considered as synonyms. #[serde(rename = "synonyms")] pub synonyms: Vec, + /// Locale for the synonym, leave blank to use the standard tokenizer. + #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] + pub locale: Option, + /// By default, special characters are dropped from synonyms. Use this attribute to specify which special characters should be indexed as is. + #[serde(rename = "symbols_to_index", skip_serializing_if = "Option::is_none")] + pub symbols_to_index: Option>, } impl SearchSynonymSchema { pub fn new(synonyms: Vec) -> SearchSynonymSchema { SearchSynonymSchema { - locale: None, root: None, - symbols_to_index: None, synonyms, + locale: None, + symbols_to_index: None, } } } diff --git a/typesense_codegen/src/models/stemming_dictionary_words_inner.rs b/typesense_codegen/src/models/stemming_dictionary_words_inner.rs index ef6c067..3afafab 100644 --- a/typesense_codegen/src/models/stemming_dictionary_words_inner.rs +++ b/typesense_codegen/src/models/stemming_dictionary_words_inner.rs @@ -13,19 +13,19 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct StemmingDictionaryWordsInner { - /// The root form of the word - #[serde(rename = "root")] - pub root: String, /// The word form to be stemmed #[serde(rename = "word")] pub word: String, + /// The root form of the word + #[serde(rename = "root")] + pub root: String, } impl StemmingDictionaryWordsInner { - pub fn new(root: String, word: String) -> StemmingDictionaryWordsInner { + pub fn new(word: String, root: String) -> StemmingDictionaryWordsInner { StemmingDictionaryWordsInner { - root, word, + root, } } } diff --git a/typesense_codegen/src/models/stopwords_set_schema.rs b/typesense_codegen/src/models/stopwords_set_schema.rs index 1a9e9c9..a515ce4 100644 --- a/typesense_codegen/src/models/stopwords_set_schema.rs +++ b/typesense_codegen/src/models/stopwords_set_schema.rs @@ -15,18 +15,18 @@ use serde::{Deserialize, Serialize}; pub struct StopwordsSetSchema { #[serde(rename = "id")] pub id: String, - #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] - pub locale: Option, #[serde(rename = "stopwords")] pub stopwords: Vec, + #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] + pub locale: Option, } impl StopwordsSetSchema { pub fn new(id: String, stopwords: Vec) -> StopwordsSetSchema { StopwordsSetSchema { id, - locale: None, stopwords, + locale: None, } } } diff --git a/typesense_codegen/src/models/stopwords_set_upsert_schema.rs b/typesense_codegen/src/models/stopwords_set_upsert_schema.rs index 9900fb8..9a9add2 100644 --- a/typesense_codegen/src/models/stopwords_set_upsert_schema.rs +++ b/typesense_codegen/src/models/stopwords_set_upsert_schema.rs @@ -13,17 +13,17 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct StopwordsSetUpsertSchema { - #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] - pub locale: Option, #[serde(rename = "stopwords")] pub stopwords: Vec, + #[serde(rename = "locale", skip_serializing_if = "Option::is_none")] + pub locale: Option, } impl StopwordsSetUpsertSchema { pub fn new(stopwords: Vec) -> StopwordsSetUpsertSchema { StopwordsSetUpsertSchema { - locale: None, stopwords, + locale: None, } } } diff --git a/typesense_codegen/src/models/update_documents_parameters.rs b/typesense_codegen/src/models/update_documents_parameters.rs new file mode 100644 index 0000000..a4ff053 --- /dev/null +++ b/typesense_codegen/src/models/update_documents_parameters.rs @@ -0,0 +1,27 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct UpdateDocumentsParameters { + #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] + pub filter_by: Option, +} + +impl UpdateDocumentsParameters { + pub fn new() -> UpdateDocumentsParameters { + UpdateDocumentsParameters { + filter_by: None, + } + } +} + diff --git a/typesense_codegen/src/models/update_documents_update_documents_parameters_parameter.rs b/typesense_codegen/src/models/update_documents_update_documents_parameters_parameter.rs new file mode 100644 index 0000000..6eb1b10 --- /dev/null +++ b/typesense_codegen/src/models/update_documents_update_documents_parameters_parameter.rs @@ -0,0 +1,27 @@ +/* + * Typesense API + * + * An open source search engine for building delightful search experiences. + * + * The version of the OpenAPI document: 28.0 + * + * Generated by: https://openapi-generator.tech + */ + +use crate::models; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct UpdateDocumentsUpdateDocumentsParametersParameter { + #[serde(rename = "filter_by", skip_serializing_if = "Option::is_none")] + pub filter_by: Option, +} + +impl UpdateDocumentsUpdateDocumentsParametersParameter { + pub fn new() -> UpdateDocumentsUpdateDocumentsParametersParameter { + UpdateDocumentsUpdateDocumentsParametersParameter { + filter_by: None, + } + } +} + diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index e5e1a5d..a394748 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -1,9 +1,11 @@ [package] name = "xtask" version = "0.1.0" -edition = "2018" +edition = "2021" [dependencies] reqwest = { version = "0.11", features = ["blocking"] } # "blocking" is simpler for scripts anyhow = "1.0" clap = { version = "4.0", features = ["derive"] } +serde = { version = "1.0", features = ["derive"] } +serde_yaml = "0.9" diff --git a/xtask/src/main.rs b/xtask/src/main.rs index 7da5009..12be673 100644 --- a/xtask/src/main.rs +++ b/xtask/src/main.rs @@ -3,12 +3,15 @@ use clap::{Parser, ValueEnum}; use std::env; use std::fs; use std::process::Command; +mod preprocess_openapi; +use preprocess_openapi::preprocess_openapi_file; const SPEC_URL: &str = - "https://raw.githubusercontent.com/typesense/typesense-go/master/typesense/api/generator/generator.yml"; -// Input spec file, expected in the project root. + "https://raw.githubusercontent.com/typesense/typesense-api-spec/master/openapi.yml"; -const INPUT_SPEC_FILE: &str = "typesense-go-unwrapped-api-spec.yaml"; +// Input spec file, expected in the project root. +const INPUT_SPEC_FILE: &str = "openapi.yml"; +const OUTPUT_PREPROCESSED_FILE: &str = "./preprocessed_openapi.yml"; // Output directory for the generated code. const OUTPUT_DIR: &str = "typesense_codegen"; @@ -36,7 +39,6 @@ enum Task { CodeGen, } -// 3. The main function now parses the CLI arguments and loops through the requested tasks fn main() -> Result<()> { let cli = Cli::parse(); @@ -53,7 +55,6 @@ fn main() -> Result<()> { fn task_fetch_api_spec() -> Result<()> { println!("▶️ Running codegen task..."); - // 1. Fetch the OpenAPI spec println!(" - Downloading spec from {}", SPEC_URL); let response = reqwest::blocking::get(SPEC_URL).context("Failed to download OpenAPI spec file")?; @@ -76,6 +77,9 @@ fn task_fetch_api_spec() -> Result<()> { fn task_codegen() -> Result<()> { println!("▶️ Running codegen task via Docker..."); + println!("Preprocessing the Open API spec file..."); + preprocess_openapi_file(INPUT_SPEC_FILE, OUTPUT_PREPROCESSED_FILE) + .expect("Preprocess failed, aborting!"); // 1. Get the absolute path to the project's root directory. // std::env::current_dir() gives us the directory from which `cargo xtask` was run. let project_root = env::current_dir().context("Failed to get current directory")?; @@ -105,12 +109,20 @@ fn task_codegen() -> Result<()> { .arg("openapitools/openapi-generator-cli") .arg("generate") .arg("-i") - .arg(format!("/local/{}", INPUT_SPEC_FILE)) // Input path inside the container + .arg(format!("/local/{}", OUTPUT_PREPROCESSED_FILE)) // Input path inside the container .arg("-g") - .arg("rust") // The language generator + .arg("rust") .arg("-o") .arg(format!("/local/{}", OUTPUT_DIR)) // Output path inside the container - .status() // Execute the command and wait for it to finish + .arg("--additional-properties") + .arg("library=reqwest") + .arg("--additional-properties") + .arg("supportMiddleware=true") + .arg("--additional-properties") + .arg("useSingleRequestParameter=true") + // .arg("--additional-properties") + // .arg("useBonBuilder=true") + .status() .context("Failed to execute Docker command. Is Docker installed and running?")?; // 5. Check if the command was successful. diff --git a/xtask/src/preprocess_openapi.rs b/xtask/src/preprocess_openapi.rs new file mode 100644 index 0000000..e7524a7 --- /dev/null +++ b/xtask/src/preprocess_openapi.rs @@ -0,0 +1,256 @@ +use serde_yaml::{Mapping, Value}; +use std::fs; + +// --- Main function to orchestrate the file reading, processing, and writing --- +pub fn preprocess_openapi_file( + input_path: &str, + output_path: &str, +) -> Result<(), Box> { + // --- Step 1: Read the OpenAPI spec from the input file --- + println!("Reading OpenAPI spec from {}...", input_path); + let input_content = fs::read_to_string(input_path) + .map_err(|e| format!("Failed to read {}: {}", input_path, e))?; + let mut doc: Value = serde_yaml::from_str(&input_content)?; + + // Ensure the root is a mutable mapping + let doc_root = doc + .as_mapping_mut() + .ok_or("OpenAPI spec root is not a YAML map")?; + + // --- Step 2: Apply all the required transformations --- + println!("Preprocessing the spec..."); + unwrap_search_parameters(doc_root)?; + unwrap_multi_search_parameters(doc_root)?; + unwrap_parameters_by_path( + doc_root, + "/collections/{collectionName}/documents/import", + "post", + "importDocumentsParameters", + Some("ImportDocumentsParameters"), // Copy schema to components + )?; + unwrap_parameters_by_path( + doc_root, + "/collections/{collectionName}/documents/export", + "get", + "exportDocumentsParameters", + Some("ExportDocumentsParameters"), // Copy schema to components + )?; + unwrap_parameters_by_path( + doc_root, + "/collections/{collectionName}/documents", + "patch", + "updateDocumentsParameters", + Some("UpdateDocumentsParameters"), // Copy schema to components + )?; + unwrap_parameters_by_path( + doc_root, + "/collections/{collectionName}/documents", + "delete", + "deleteDocumentsParameters", + Some("DeleteDocumentsParameters"), // Copy schema to components + )?; + remove_additional_properties_from_search_hit(doc_root)?; + println!("Preprocessing complete."); + + // --- Step 3: Serialize the modified spec and write to the output file --- + println!("Writing processed spec to {}...", output_path); + let output_yaml = serde_yaml::to_string(&doc)?; + fs::write(output_path, output_yaml) + .map_err(|e| format!("Failed to write {}: {}", output_path, e))?; + + println!("Successfully created {}.", output_path); + Ok(()) +} + +/// A generic function to: +/// 1. (Optional) Copy an inline parameter schema to `components/schemas`. +/// 2. Unwrap that parameter object into individual query parameters within the `paths` definition. +fn unwrap_parameters_by_path( + doc: &mut Mapping, + path: &str, + method: &str, + param_name_to_unwrap: &str, + new_component_name: Option<&str>, +) -> Result<(), String> { + // --- Step 1 (Optional): Copy the inline schema to components --- + if let Some(component_name) = new_component_name { + println!( + "- Copying inline schema for '{}' to components.schemas.{}...", + param_name_to_unwrap, component_name + ); + + // Find the parameter with the inline schema to copy using a read-only borrow + let params_for_copy = doc + .get("paths") + .and_then(|p| p.get(path)) + .and_then(|p| p.get(method)) + .and_then(|op| op.get("parameters")) + .and_then(|params| params.as_sequence()) + .ok_or_else(|| format!("Could not find parameters for {} {}", method, path))?; + + let param_to_copy = params_for_copy + .iter() + .find(|p| p.get("name").and_then(|n| n.as_str()) == Some(param_name_to_unwrap)) + .ok_or_else(|| format!("Parameter '{}' not found for copying", param_name_to_unwrap))?; + + let inline_schema = param_to_copy + .get("schema") + .cloned() // Clone the schema to avoid borrowing issues + .ok_or_else(|| format!("No schema found for '{}'", param_name_to_unwrap))?; + + // Get a mutable borrow to insert the cloned schema into components + let schemas = doc + .get_mut("components") + .and_then(|c| c.get_mut("schemas")) + .and_then(|s| s.as_mapping_mut()) + .ok_or_else(|| "Could not find components/schemas section".to_string())?; + + schemas.insert(component_name.into(), inline_schema); + } + + // --- Step 2: Unwrap the parameter object into individual parameters --- + println!( + "- Unwrapping parameter object '{}'...", + param_name_to_unwrap + ); + + // Navigate down to the operation's parameters list (mutable) + let params_for_unwrap = doc + .get_mut("paths") + .and_then(|p| p.get_mut(path)) + .and_then(|p| p.get_mut(method)) + .and_then(|op| op.get_mut("parameters")) + .and_then(|params| params.as_sequence_mut()) + .ok_or_else(|| format!("Could not find parameters for {} {}", method, path))?; + + let param_index = params_for_unwrap + .iter() + .position(|p| p.get("name").and_then(|n| n.as_str()) == Some(param_name_to_unwrap)) + .ok_or_else(|| format!("Parameter '{}' not found in {}", param_name_to_unwrap, path))?; + + let param_object = params_for_unwrap.remove(param_index); + let properties = param_object + .get("schema") + .and_then(|s| s.get("properties")) + .and_then(|p| p.as_mapping()) + .ok_or_else(|| { + format!( + "Could not extract properties from '{}'", + param_name_to_unwrap + ) + })?; + + for (key, value) in properties { + let mut new_param = Mapping::new(); + new_param.insert("name".into(), key.clone()); + new_param.insert("in".into(), "query".into()); + new_param.insert("schema".into(), value.clone()); + params_for_unwrap.push(new_param.into()); + } + + Ok(()) +} + +/// Special handler for unwrapping search parameters from `components/schemas`. +fn unwrap_search_parameters(doc: &mut Mapping) -> Result<(), String> { + println!("- Unwrapping searchParameters..."); + // Get the definition of SearchParameters from components + let search_params_props = doc + .get("components") + .and_then(|c| c.get("schemas")) + .and_then(|s| s.get("SearchParameters")) + .and_then(|sp| sp.get("properties")) + .and_then(|p| p.as_mapping()) + .cloned() // Clone to avoid borrowing issues + .ok_or_else(|| "Could not find schema for SearchParameters".to_string())?; + + // Navigate to the operation's parameters list + let params = doc + .get_mut("paths") + .and_then(|p| p.get_mut("/collections/{collectionName}/documents/search")) + .and_then(|p| p.get_mut("get")) + .and_then(|op| op.get_mut("parameters")) + .and_then(|params| params.as_sequence_mut()) + .ok_or_else(|| { + "Could not find parameters for /collections/{collectionName}/documents/search" + .to_string() + })?; + + // Find and remove the old parameter object. + let param_index = params + .iter() + .position(|p| p.get("name").and_then(|n| n.as_str()) == Some("searchParameters")) + .ok_or_else(|| "searchParameters object not found".to_string())?; + params.remove(param_index); + + // Add the new individual parameters. + for (key, value) in search_params_props { + let mut new_param = Mapping::new(); + new_param.insert("name".into(), key.clone()); + new_param.insert("in".into(), "query".into()); + new_param.insert("schema".into(), value.clone()); + params.push(new_param.into()); + } + + Ok(()) +} + +/// Special handler for unwrapping multi-search parameters from `components/schemas`. +fn unwrap_multi_search_parameters(doc: &mut Mapping) -> Result<(), String> { + println!("- Unwrapping multiSearchParameters..."); + // Get the definition of MultiSearchParameters from components + let search_params_props: Mapping = doc + .get("components") + .and_then(|c| c.get("schemas")) + .and_then(|s| s.get("MultiSearchParameters")) + .and_then(|sp| sp.get("properties")) + .and_then(|p| p.as_mapping()) + .cloned() + .ok_or_else(|| "Could not find schema for MultiSearchParameters".to_string())?; + + // Navigate to the operation's parameters list + let params = doc + .get_mut("paths") + .and_then(|p| p.get_mut("/multi_search")) + .and_then(|p| p.get_mut("post")) + .and_then(|op| op.get_mut("parameters")) + .and_then(|params| params.as_sequence_mut()) + .ok_or_else(|| "Could not find parameters for /multi_search".to_string())?; + + // Find and remove the old parameter object. + let param_index = params + .iter() + .position(|p| p.get("name").and_then(|n| n.as_str()) == Some("multiSearchParameters")) + .ok_or_else(|| "multiSearchParameters object not found".to_string())?; + params.remove(param_index); + + // Add the new individual parameters. + for (key, value) in search_params_props { + let mut new_param = Mapping::new(); + new_param.insert("name".into(), key.clone()); + new_param.insert("in".into(), "query".into()); + new_param.insert("schema".into(), value.clone()); + params.push(new_param.into()); + } + + Ok(()) +} + +/// Modifies the SearchResultHit schema to remove `additionalProperties` from the `document` field. +fn remove_additional_properties_from_search_hit(doc: &mut Mapping) -> Result<(), String> { + let document_prop = doc + .get_mut("components") + .and_then(|c| c.get_mut("schemas")) + .and_then(|s| s.get_mut("SearchResultHit")) + .and_then(|srh| srh.get_mut("properties")) + .and_then(|props| props.get_mut("document")) + .and_then(|doc_val| doc_val.as_mapping_mut()) + .ok_or_else(|| "Could not find document property in SearchResultHit schema".to_string())?; + + // Remove the 'additionalProperties' key + if document_prop.remove("additionalProperties").is_some() { + println!("- Removed additionalProperties from SearchResultHit.document"); + } + + Ok(()) +} From 57a1f6e929a9b9fbda55a899c8ac40530f4f06ea Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Thu, 17 Jul 2025 21:42:15 +0700 Subject: [PATCH 03/21] feat: documents `.create()` and `.upsert()` --- typesense/src/client/collection/documents.rs | 70 ++++++++------------ 1 file changed, 26 insertions(+), 44 deletions(-) diff --git a/typesense/src/client/collection/documents.rs b/typesense/src/client/collection/documents.rs index 6226368..bbaed59 100644 --- a/typesense/src/client/collection/documents.rs +++ b/typesense/src/client/collection/documents.rs @@ -31,13 +31,12 @@ impl<'a> Documents<'a> { /// Indexes a document in the collection. /// - /// If the document has an 'id' field, it will be used as the document's ID. - /// Otherwise, Typesense will auto-generate an ID. + /// /// # Arguments /// * `document` - A `serde_json::Value` representing the document. - /// * `action` - The indexing action to perform (e.g., "create", "upsert", "update"). - pub async fn index( + /// * `action` - The indexing action to perform (e.g., "create", "upsert"). + async fn index( &self, document: serde_json::Value, action: &str, @@ -56,67 +55,50 @@ impl<'a> Documents<'a> { .await } - /// Fetches an individual document from the collection by its ID. + /// Creates a new document in the collection. + /// Fails if a document with the same id already exists + /// + /// If the document has an `id` field of type `string`, it will be used as the document's ID. + /// Otherwise, Typesense will auto-generate an ID. /// /// # Arguments - /// * `document_id` - The ID of the document to retrieve. - pub async fn retrieve( + /// * `document` - A `serde_json::Value` representing the document to create. + pub async fn create( &self, - document_id: &str, - ) -> Result> { - let params = documents_api::GetDocumentParams { - collection_name: self.collection_name.to_string(), - document_id: document_id.to_string(), - }; - - self.client - .execute(|config: Arc| { - let params_for_move = params.clone(); - async move { documents_api::get_document(&config, params_for_move).await } - }) - .await + document: serde_json::Value, + ) -> Result> { + self.index(document, "create").await } - /// Updates an individual document from the collection by its ID. The update can be partial. + /// Creates a new document or updates an existing document if a document with the same id already exists. + /// Requires the whole document to be sent. For partial updates, use the `update()` action. /// /// # Arguments - /// * `document_id` - The ID of the document to update. - /// * `document` - A `serde_json::Value` containing the fields to update. - pub async fn update( + /// * `document` - A `serde_json::Value` representing the document to upsert. + pub async fn upsert( &self, - document_id: &str, document: serde_json::Value, - ) -> Result> { - let params = documents_api::UpdateDocumentParams { - collection_name: self.collection_name.to_string(), - document_id: document_id.to_string(), - body: document, - dirty_values: None, - }; - self.client - .execute(|config: Arc| { - let params_for_move = params.clone(); - async move { documents_api::update_document(&config, params_for_move).await } - }) - .await + ) -> Result> { + self.index(document, "upsert").await } - /// Deletes an individual document from the collection by its ID. + /// Fetches an individual document from the collection by its ID. /// /// # Arguments - /// * `document_id` - The ID of the document to delete. - pub async fn delete( + /// * `document_id` - The ID of the document to retrieve. + pub async fn retrieve( &self, document_id: &str, - ) -> Result> { - let params = documents_api::DeleteDocumentParams { + ) -> Result> { + let params = documents_api::GetDocumentParams { collection_name: self.collection_name.to_string(), document_id: document_id.to_string(), }; + self.client .execute(|config: Arc| { let params_for_move = params.clone(); - async move { documents_api::delete_document(&config, params_for_move).await } + async move { documents_api::get_document(&config, params_for_move).await } }) .await } From 46a220883939c55108d6864a5b692b45c0c424f8 Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Fri, 18 Jul 2025 21:21:49 +0700 Subject: [PATCH 04/21] feat: alias --- typesense/src/client/alias.rs | 56 ++++++++++++++++++++ typesense/src/client/aliases.rs | 61 ++++++++++++++++++++++ typesense/src/client/collections.rs | 79 ----------------------------- typesense/src/client/mod.rs | 13 +++++ 4 files changed, 130 insertions(+), 79 deletions(-) create mode 100644 typesense/src/client/alias.rs create mode 100644 typesense/src/client/aliases.rs diff --git a/typesense/src/client/alias.rs b/typesense/src/client/alias.rs new file mode 100644 index 0000000..c49bf86 --- /dev/null +++ b/typesense/src/client/alias.rs @@ -0,0 +1,56 @@ +//! Provides access to the collection alias-related API endpoints. +//! +//! An `Alias` instance is created via the main `client.alias()` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{collections_api, configuration}, + models, +}; + +/// Provides methods for interacting with a specific Typesense collection alias. +/// +/// This struct is created by calling `client.alias()`. +pub struct Alias<'a> { + pub(super) client: &'a Client, + pub(super) name: &'a str, +} + +impl<'a> Alias<'a> { + /// Creates a new `Alias` instance. + pub(super) fn new(client: &'a Client, name: &'a str) -> Self { + Self { client, name } + } + + /// Retrieves the details of a collection alias, including the collection it points to. + pub async fn retrieve( + &self, + ) -> Result> { + let params = collections_api::GetAliasParams { + alias_name: self.name.to_string(), + }; + + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { collections_api::get_alias(&config, params_for_move).await } + }) + .await + } + + /// Deletes a collection alias. + pub async fn delete( + &self, + ) -> Result> { + let params = collections_api::DeleteAliasParams { + alias_name: self.name.to_string(), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { collections_api::delete_alias(&config, params_for_move).await } + }) + .await + } +} diff --git a/typesense/src/client/aliases.rs b/typesense/src/client/aliases.rs new file mode 100644 index 0000000..53594ce --- /dev/null +++ b/typesense/src/client/aliases.rs @@ -0,0 +1,61 @@ +//! Provides access to the collection aliases-related API endpoints. +//! +//! An `Aliases` instance is created via the main `client.aliases()` method. + +use super::{Client, Error}; +use std::sync::Arc; +use typesense_codegen::{ + apis::{collections_api, configuration}, + models, +}; + +/// Provides methods for interacting with Typesense collection aliases. +/// +/// This struct is created by calling `client.aliases()`. +pub struct Aliases<'a> { + pub(super) client: &'a Client, +} + +impl<'a> Aliases<'a> { + /// Creates a new `Aliases` instance. + pub(super) fn new(client: &'a Client) -> Self { + Self { client } + } + + /// Creates or updates a collection alias. + /// + /// An alias is a virtual collection name that points to a real collection. + /// Aliases are useful when you want to re-index your data in the background + /// on a new collection and then switch your application to it without any + /// changes to your code. + /// + /// # Arguments + /// * `schema` - A `CollectionAliasSchema` pointing to the target collection. + pub async fn upsert( + &self, + alias_name: &str, + schema: models::CollectionAliasSchema, + ) -> Result> { + let params = collections_api::UpsertAliasParams { + alias_name: alias_name.to_string(), + collection_alias_schema: Some(schema), + }; + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { collections_api::upsert_alias(&config, params_for_move).await } + }) + .await + } + + /// Lists all aliases and the corresponding collections that they map to. + pub async fn retrieve( + &self, + ) -> Result> { + self.client + .execute(|config: Arc| async move { + collections_api::get_aliases(&config).await + }) + .await + } +} diff --git a/typesense/src/client/collections.rs b/typesense/src/client/collections.rs index 7c2846f..e798fb5 100644 --- a/typesense/src/client/collections.rs +++ b/typesense/src/client/collections.rs @@ -59,83 +59,4 @@ impl<'a> Collections<'a> { }) .await } - - // --- Alias-Specific Methods --- - - /// Creates or updates a collection alias. - /// - /// An alias is a virtual collection name that points to a real collection. - /// Aliases are useful when you want to re-index your data in the background - /// on a new collection and then switch your application to it without any - /// changes to your code. - /// - /// # Arguments - /// * `name` - The name of the alias to create or update. - /// * `schema` - A `CollectionAliasSchema` pointing to the target collection. - pub async fn upsert_alias( - &self, - name: &str, - schema: models::CollectionAliasSchema, - ) -> Result> { - let params = collections_api::UpsertAliasParams { - alias_name: name.to_string(), - collection_alias_schema: Some(schema), - }; - self.client - .execute(|config: Arc| { - let params_for_move = params.clone(); - async move { collections_api::upsert_alias(&config, params_for_move).await } - }) - .await - } - - /// Retrieves the details of a collection alias, including the collection it points to. - /// - /// # Arguments - /// * `name` - The name of the alias to retrieve. - pub async fn get_alias( - &self, - name: &str, - ) -> Result> { - let params = collections_api::GetAliasParams { - alias_name: name.to_string(), - }; - - self.client - .execute(|config: Arc| { - let params_for_move = params.clone(); - async move { collections_api::get_alias(&config, params_for_move).await } - }) - .await - } - - /// Lists all aliases and the corresponding collections that they map to. - pub async fn list_aliases( - &self, - ) -> Result> { - self.client - .execute(|config: Arc| async move { - collections_api::get_aliases(&config).await - }) - .await - } - - /// Deletes a collection alias. - /// - /// # Arguments - /// * `name` - The name of the alias to delete. - pub async fn delete_alias( - &self, - name: &str, - ) -> Result> { - let params = collections_api::DeleteAliasParams { - alias_name: name.to_string(), - }; - self.client - .execute(|config: Arc| { - let params_for_move = params.clone(); - async move { collections_api::delete_alias(&config, params_for_move).await } - }) - .await - } } diff --git a/typesense/src/client/mod.rs b/typesense/src/client/mod.rs index 55e782e..6c3c1e0 100644 --- a/typesense/src/client/mod.rs +++ b/typesense/src/client/mod.rs @@ -50,6 +50,8 @@ //! } //! ``` +pub mod alias; +pub mod aliases; pub mod analytics; pub mod collection; pub mod collections; @@ -64,6 +66,8 @@ pub mod stemming; pub mod stopword; pub mod stopwords; +pub use alias::Alias; +pub use aliases::Aliases; pub use analytics::Analytics; pub use collection::Collection; pub use collections::Collections; @@ -311,6 +315,15 @@ impl Client { )))) } + /// Provides access to the collection aliases-related API endpoints. + pub fn aliases(&self) -> Aliases<'_> { + Aliases::new(self) + } + + /// Provides access to a specific collection alias's-related API endpoints. + pub fn alias<'a>(&'a self, name: &'a str) -> Alias<'a> { + Alias::new(self, name) + } /// Provides access to API endpoints for managing collections like `create()` and `retrieve()`. pub fn collections(&self) -> collections::Collections<'_> { collections::Collections::new(self) From 0d96f6b60d3a9fe8d1006a52299896ddb590abbe Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Thu, 24 Jul 2025 22:11:59 +0700 Subject: [PATCH 05/21] integration tests --- .gitignore | 3 +- compose.yml | 9 + typesense/Cargo.toml | 1 + typesense/src/client/collection/document.rs | 6 +- typesense/src/client/collection/documents.rs | 80 ++-- .../src/client/collection/search_override.rs | 2 +- .../src/client/collection/search_overrides.rs | 2 +- typesense/src/client/collections.rs | 2 +- typesense/src/client/mod.rs | 431 +++++++++++++++--- typesense/src/client/multi_search.rs | 5 + typesense/src/client/stemming/dictionaries.rs | 2 +- typesense/src/client/stemming/dictionary.rs | 18 +- typesense/tests/api/collection.rs | 272 +++++------ typesense/tests/api/documents.rs | 222 ++++----- typesense/tests/api/lib.rs | 82 ++-- typesense/tests/client/aliases_test.rs | 92 ++++ typesense/tests/client/analytics_test.rs | 123 +++++ typesense/tests/client/client_test.rs | 326 +++++++++++++ typesense/tests/client/collections_test.rs | 133 ++++++ .../tests/client/conversation_models_test.rs | 353 ++++++++++++++ typesense/tests/client/documents_test.rs | 205 +++++++++ typesense/tests/client/keys_test.rs | 82 ++++ typesense/tests/client/mod.rs | 321 ++----------- typesense/tests/client/multi_search_test.rs | 248 ++++++++++ typesense/tests/client/presets_test.rs | 86 ++++ .../tests/client/search_overrides_test.rs | 120 +++++ .../client/stemming_dictionaries_test.rs | 73 +++ typesense/tests/client/stopwords_test.rs | 59 +++ typesense/tests/client/synonyms_test.rs | 112 +++++ typesense_codegen/src/apis/documents_api.rs | 394 +++++++++++----- typesense_codegen/src/apis/stemming_api.rs | 74 ++- 31 files changed, 3118 insertions(+), 820 deletions(-) create mode 100644 compose.yml create mode 100644 typesense/tests/client/aliases_test.rs create mode 100644 typesense/tests/client/analytics_test.rs create mode 100644 typesense/tests/client/client_test.rs create mode 100644 typesense/tests/client/collections_test.rs create mode 100644 typesense/tests/client/conversation_models_test.rs create mode 100644 typesense/tests/client/documents_test.rs create mode 100644 typesense/tests/client/keys_test.rs create mode 100644 typesense/tests/client/multi_search_test.rs create mode 100644 typesense/tests/client/presets_test.rs create mode 100644 typesense/tests/client/search_overrides_test.rs create mode 100644 typesense/tests/client/stemming_dictionaries_test.rs create mode 100644 typesense/tests/client/stopwords_test.rs create mode 100644 typesense/tests/client/synonyms_test.rs diff --git a/.gitignore b/.gitignore index 3549fae..8d7229e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ /target Cargo.lock -.env \ No newline at end of file +.env +/typesense-data \ No newline at end of file diff --git a/compose.yml b/compose.yml new file mode 100644 index 0000000..c77de65 --- /dev/null +++ b/compose.yml @@ -0,0 +1,9 @@ +services: + typesense: + image: typesense/typesense:29.0 + restart: on-failure + ports: + - '8108:8108' + volumes: + - ./typesense-data:/data + command: '--data-dir /data --api-key=xyz --enable-cors' diff --git a/typesense/Cargo.toml b/typesense/Cargo.toml index d703ba9..ed271b3 100644 --- a/typesense/Cargo.toml +++ b/typesense/Cargo.toml @@ -35,6 +35,7 @@ dotenvy = "0.15" trybuild = "1.0.42" tokio = { version = "1", features = ["macros", "rt-multi-thread"] } wiremock = "0.5" +nanoid = "0.4" [target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] tokio = { version = "1.5", features = ["macros", "rt", "rt-multi-thread"] } diff --git a/typesense/src/client/collection/document.rs b/typesense/src/client/collection/document.rs index 10c3005..8ff1cd5 100644 --- a/typesense/src/client/collection/document.rs +++ b/typesense/src/client/collection/document.rs @@ -10,7 +10,7 @@ use typesense_codegen::apis::{configuration, documents_api}; /// Provides methods for interacting with a single document within a specific Typesense collection. /// -/// This struct is created by calling a method like `collection.document("document_id")`. +/// This struct is created by calling a method like `client.collection("collection_name").document("document_id")`. pub struct Document<'a> { pub(super) client: &'a Client, pub(super) collection_name: &'a str, @@ -29,7 +29,9 @@ impl<'a> Document<'a> { } /// Fetches this individual document from the collection. - pub async fn get(&self) -> Result> { + pub async fn retrieve( + &self, + ) -> Result> { let params = documents_api::GetDocumentParams { collection_name: self.collection_name.to_string(), document_id: self.document_id.to_string(), diff --git a/typesense/src/client/collection/documents.rs b/typesense/src/client/collection/documents.rs index bbaed59..e4b7360 100644 --- a/typesense/src/client/collection/documents.rs +++ b/typesense/src/client/collection/documents.rs @@ -7,7 +7,10 @@ use super::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{configuration, documents_api}, - models, + models::{ + self, DeleteDocumentsParameters, ExportDocumentsParameters, ImportDocumentsParameters, + UpdateDocumentsParameters, + }, }; /// Provides methods for interacting with documents within a specific Typesense collection. @@ -82,27 +85,6 @@ impl<'a> Documents<'a> { self.index(document, "upsert").await } - /// Fetches an individual document from the collection by its ID. - /// - /// # Arguments - /// * `document_id` - The ID of the document to retrieve. - pub async fn retrieve( - &self, - document_id: &str, - ) -> Result> { - let params = documents_api::GetDocumentParams { - collection_name: self.collection_name.to_string(), - document_id: document_id.to_string(), - }; - - self.client - .execute(|config: Arc| { - let params_for_move = params.clone(); - async move { documents_api::get_document(&config, params_for_move).await } - }) - .await - } - // --- Bulk Operation Methods --- /// Imports a batch of documents in JSONL format. @@ -111,15 +93,23 @@ impl<'a> Documents<'a> { /// /// # Arguments /// * `documents_jsonl` - A string containing the documents in JSONL format. - /// * `params` - An `ImportDocumentsParams` struct containing options like `action` and `batch_size`. - /// The `collection_name` field will be overwritten. + /// * `params` - An `ImportDocumentsParameters` struct containing options like `action` and `batch_size`. pub async fn import( &self, documents_jsonl: String, - mut params: documents_api::ImportDocumentsParams, + params: ImportDocumentsParameters, ) -> Result> { - params.collection_name = self.collection_name.to_string(); - params.body = documents_jsonl; + let params = documents_api::ImportDocumentsParams { + body: documents_jsonl, + collection_name: self.collection_name.to_string(), + + action: params.action, + batch_size: params.batch_size, + dirty_values: params.dirty_values, + remote_embedding_batch_size: params.remote_embedding_batch_size, + return_doc: params.return_doc, + return_id: params.return_id, + }; self.client .execute(|config: Arc| { @@ -132,13 +122,17 @@ impl<'a> Documents<'a> { /// Exports all documents in a collection in JSONL format. /// /// # Arguments - /// * `params` - An `ExportDocumentsParams` struct containing options like `filter_by` and `include_fields`. - /// The `collection_name` field will be overwritten. + /// * `params` - An `ExportDocumentsParameters` struct containing options like `filter_by` and `include_fields`. pub async fn export( &self, - mut params: documents_api::ExportDocumentsParams, + params: ExportDocumentsParameters, ) -> Result> { - params.collection_name = self.collection_name.to_string(); + let params = documents_api::ExportDocumentsParams { + collection_name: self.collection_name.to_string(), + exclude_fields: params.exclude_fields, + filter_by: params.filter_by, + include_fields: params.include_fields, + }; self.client .execute(|config: Arc| { @@ -151,20 +145,18 @@ impl<'a> Documents<'a> { /// Deletes a batch of documents matching a specific filter condition. /// /// # Arguments - /// * `filter_by` - The filter condition for deleting documents. - /// * `batch_size` - The number of documents to delete at a time. - pub async fn delete_by_filter( + /// * `params` - A `DeleteDocumentsParameters` describing the conditions for deleting documents. + pub async fn delete( &self, - filter_by: &str, - batch_size: Option, + params: DeleteDocumentsParameters, ) -> Result> { let params = documents_api::DeleteDocumentsParams { collection_name: self.collection_name.to_string(), - filter_by: Some(filter_by.to_string()), - batch_size, - ignore_not_found: None, - truncate: None, + filter_by: Some(params.filter_by), + batch_size: params.batch_size, + ignore_not_found: params.ignore_not_found, + truncate: params.truncate, }; self.client .execute(|config: Arc| { @@ -177,17 +169,17 @@ impl<'a> Documents<'a> { /// Updates a batch of documents matching a specific filter condition. /// /// # Arguments - /// * `filter_by` - The filter condition for updating documents. /// * `document` - A `serde_json::Value` containing the fields to update. - pub async fn update_by_filter( + /// * `params` - A `UpdateDocumentsParameters` describing the conditions for updating documents. + pub async fn update( &self, - filter_by: &str, document: serde_json::Value, + params: UpdateDocumentsParameters, ) -> Result> { let params = documents_api::UpdateDocumentsParams { collection_name: self.collection_name.to_string(), - filter_by: Some(filter_by.to_string()), + filter_by: params.filter_by, body: document, }; self.client diff --git a/typesense/src/client/collection/search_override.rs b/typesense/src/client/collection/search_override.rs index d8ae007..9805a5d 100644 --- a/typesense/src/client/collection/search_override.rs +++ b/typesense/src/client/collection/search_override.rs @@ -11,7 +11,7 @@ use typesense_codegen::{ /// Provides methods for interacting with a specific search override. /// -/// This struct is created by calling `documents.search_override("override_id")`. +/// This struct is created by calling `client.collection("colelction_name").search_override("override_id")`. pub struct SearchOverride<'a> { pub(super) client: &'a Client, pub(super) collection_name: &'a str, diff --git a/typesense/src/client/collection/search_overrides.rs b/typesense/src/client/collection/search_overrides.rs index 39b1102..11151a1 100644 --- a/typesense/src/client/collection/search_overrides.rs +++ b/typesense/src/client/collection/search_overrides.rs @@ -11,7 +11,7 @@ use typesense_codegen::{ /// Provides methods for interacting with a collection of search overrides. /// -/// This struct is created by calling `client.collection("collection_name").overrides()`. +/// This struct is created by calling `client.collection("collection_name").search_overrides()`. pub struct SearchOverrides<'a> { pub(super) client: &'a Client, pub(super) collection_name: &'a str, diff --git a/typesense/src/client/collections.rs b/typesense/src/client/collections.rs index e798fb5..9378810 100644 --- a/typesense/src/client/collections.rs +++ b/typesense/src/client/collections.rs @@ -50,7 +50,7 @@ impl<'a> Collections<'a> { /// /// The collections are returned sorted by creation date, with the most /// recent collections appearing first. - pub async fn list_all( + pub async fn retrieve( &self, ) -> Result, Error> { self.client diff --git a/typesense/src/client/mod.rs b/typesense/src/client/mod.rs index 6c3c1e0..1613178 100644 --- a/typesense/src/client/mod.rs +++ b/typesense/src/client/mod.rs @@ -14,7 +14,7 @@ //! ## Example Usage //! //! ```no_run -//! use typesense_client::client::{Client, MultiNodeConfiguration}; +//! use typesense::client::{Client, MultiNodeConfiguration}; //! use typesense_codegen::models; //! use reqwest::Url; //! use reqwest_retry::policies::ExponentialBackoff; @@ -34,16 +34,16 @@ //! let client = Client::new(config)?; //! //! // Retrieve details for a collection -//! let collection = client.collections().get("products").await?; +//! let collection = client.collection("products").retrieve().await?; //! println!("Collection Name: {}", collection.name); //! //! // Search for a document -//! let search_params = models::SearchCollectionParams { -//! q: "phone".to_string(), -//! query_by: "name".to_string(), +//! let search_params = models::SearchParameters { +//! q: Some("phone".to_string()), +//! query_by: Some("name".to_string()), //! ..Default::default() //! }; -//! let search_results = client.documents("products").search(search_params).await?; +//! let search_results = client.collection("products").documents().search(search_params).await?; //! println!("Found {} hits.", search_results.found.unwrap_or(0)); //! //! Ok(()) @@ -93,6 +93,8 @@ use std::time::{Duration, Instant}; use thiserror::Error; use typesense_codegen::apis::{self, configuration}; +use crate::client::multi_search::MultiSearch; + // --- Internal Node Health Struct --- // This is an internal detail to track the state of each node. #[derive(Debug)] @@ -118,25 +120,46 @@ pub struct MultiNodeConfiguration { /// The timeout for each individual network request. pub connection_timeout: Duration, } +impl Default for MultiNodeConfiguration { + /// Provides a default configuration suitable for local development. + /// + /// - **nodes**: Empty. + /// - **nearest_node**: None. + /// - **api_key**: "xyz" (a common placeholder). + /// - **healthcheck_interval**: 60 seconds. + /// - **retry_policy**: Exponential backoff with a maximum of 3 retries. + /// - **connection_timeout**: 5 seconds. + fn default() -> Self { + Self { + nodes: vec![], + nearest_node: None, + api_key: "xyz".to_string(), + healthcheck_interval: Duration::from_secs(60), + retry_policy: ExponentialBackoff::builder().build_with_max_retries(3), + connection_timeout: Duration::from_secs(5), + } + } +} /// The primary error type for the Typesense client. #[derive(Debug, Error)] pub enum Error where - E: std::fmt::Debug, - apis::Error: std::fmt::Display + std::fmt::Debug, + E: std::fmt::Debug + 'static, + apis::Error: std::error::Error + 'static, { /// Indicates that all configured nodes failed to process a request. - /// The source contains the last error received. - #[error("All API nodes failed to respond.")] - AllNodesFailed(#[source] Box>), - - /// A network-level error occurred within the `reqwest` middleware stack (e.g., a connection timeout). - #[error("A single node failed with a middleware error")] - Middleware(#[from] reqwest_middleware::Error), - - /// An API-level error returned by the Typesense server (e.g., 404 Not Found, 400 Bad Request). - #[error("A single node failed with an API error")] + #[error("All API nodes failed to respond. Last error: {source}")] + AllNodesFailed { + /// The last underlying API or network error received from a node before giving up. + #[source] + source: apis::Error, + }, + + // Any middleware error will be wrapped in the Api variant below. + /// An API-level error returned by the Typesense server (e.g., 503 Service Unavailable) + /// or a network-level error from the underlying HTTP client (e.g. connection refused). + #[error("A single node failed with an API or network error")] Api(#[from] apis::Error), } @@ -248,10 +271,10 @@ impl Client { where F: Fn(Arc) -> Fut, Fut: Future>>, - E: std::fmt::Debug, - apis::Error: std::fmt::Display + std::fmt::Debug, + E: std::fmt::Debug + 'static, + apis::Error: std::error::Error + 'static, { - let mut last_error: Option> = None; + let mut last_api_error: Option> = None; let num_nodes_to_try = self.nodes.len() + self.nearest_node.is_some() as usize; // Loop up to the total number of available nodes. @@ -296,62 +319,228 @@ impl Client { return Ok(response); } Err(e) => { - let wrapped_error: Error = e.into(); - if is_retriable(&wrapped_error) { + if is_retriable(&e) { self.set_node_health(&node_arc, false); // Mark as unhealthy on retriable error. - last_error = Some(wrapped_error); + last_api_error = Some(e); // Continue loop to try the next node. } else { // Non-retriable error (e.g., 404 Not Found), fail fast. - return Err(wrapped_error); + return Err(e.into()); } } } } // If the loop finishes, all nodes have failed. - Err(Error::AllNodesFailed(Box::new(last_error.expect( - "No nodes were available to try, or all errors were non-retriable.", - )))) + Err(Error::AllNodesFailed { + source: last_api_error + .expect("No nodes were available to try, or all errors were non-retriable."), + }) } - /// Provides access to the collection aliases-related API endpoints. + /// + /// # Example + /// ```ignore + /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// # use typesense_codegen::models; + /// # use reqwest::Url; + /// # use reqwest_retry::policies::ExponentialBackoff; + /// # use std::time::Duration; + /// # + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// let all_aliases = client.aliases().retrieve().await.unwrap(); + /// # Ok(()) + /// # } + /// ``` pub fn aliases(&self) -> Aliases<'_> { Aliases::new(self) } /// Provides access to a specific collection alias's-related API endpoints. + /// # Example + /// ```ignore + /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// # use typesense_codegen::models; + /// # use reqwest::Url; + /// # use reqwest_retry::policies::ExponentialBackoff; + /// # use std::time::Duration; + /// # + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// let specific_alias = client.alias("books_alias").retrieve().await.unwrap(); + /// # Ok(()) + /// # } + /// ``` pub fn alias<'a>(&'a self, name: &'a str) -> Alias<'a> { Alias::new(self, name) } + /// Provides access to API endpoints for managing collections like `create()` and `retrieve()`. + /// # Example + /// ```ignore + /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// # use typesense_codegen::models; + /// # use reqwest::Url; + /// # use reqwest_retry::policies::ExponentialBackoff; + /// # use std::time::Duration; + /// # + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// let all_collections = client.collections().retrieve().await.unwrap(); + /// # Ok(()) + /// # } + /// ``` pub fn collections(&self) -> collections::Collections<'_> { collections::Collections::new(self) } /// Provides access to API endpoints of a specific collection. + /// # Example + /// ```ignore + /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// # use typesense_codegen::models; + /// # use reqwest::Url; + /// # use reqwest_retry::policies::ExponentialBackoff; + /// # use std::time::Duration; + /// # + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// let my_collection = client.collection("products").retrieve().await.unwrap(); + /// # Ok(()) + /// # } + /// ``` pub fn collection<'a>(&'a self, collection_name: &'a str) -> Collection<'a> { Collection::new(self, collection_name) } /// Provides access to the analytics-related API endpoints. + /// # Example + /// ```ignore + /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// # use typesense_codegen::models; + /// # use reqwest::Url; + /// # use reqwest_retry::policies::ExponentialBackoff; + /// # use std::time::Duration; + /// # + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// let analytics_rules = client.analytics().rules().retrieve().await.unwrap(); + /// # Ok(()) + /// # } + /// ``` pub fn analytics(&self) -> Analytics<'_> { Analytics::new(self) } /// Returns a `Conversations` instance for managing conversation models. + /// # Example + /// ```ignore + /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// # use typesense_codegen::models; + /// # use reqwest::Url; + /// # use reqwest_retry::policies::ExponentialBackoff; + /// # use std::time::Duration; + /// # + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// let conversation = client.conversations().models().retrieve().await.unwrap(); + /// # Ok(()) + /// # } + /// ``` pub fn conversations(&self) -> Conversations { Conversations::new(self) } /// Provides access to top-level, non-namespaced API endpoints like `health` and `debug`. + /// # Example + /// ```ignore + /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// # use typesense_codegen::models; + /// # use reqwest::Url; + /// # use reqwest_retry::policies::ExponentialBackoff; + /// # use std::time::Duration; + /// # + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// let health = client.operations().health().await.unwrap(); + /// # Ok(()) + /// # } + /// ``` pub fn operations(&self) -> Operations<'_> { Operations::new(self) } /// Provides access to endpoints for managing the collection of API keys. /// - /// Example: `client.keys().create(schema).await` + /// # Example + /// ```ignore + /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// # use typesense_codegen::models; + /// # use reqwest::Url; + /// # use reqwest_retry::policies::ExponentialBackoff; + /// # use std::time::Duration; + /// # + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// # let schema = models::ApiKeySchema { + /// # description: "Search-only key.".to_string(), + /// # actions: vec!["documents:search".to_string()], + /// # collections: vec!["*".to_string()], + /// # ..Default::default() + /// # }; + /// let new_key = client.keys().create(schema).await.unwrap(); + /// # Ok(()) + /// # } + /// ``` pub fn keys(&self) -> Keys<'_> { Keys::new(self) } @@ -361,7 +550,26 @@ impl Client { /// # Arguments /// * `key_id` - The ID of the key to manage. /// - /// Example: `client.key(123).delete().await` + /// # Example + /// ```ignore + /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// # use typesense_codegen::models; + /// # use reqwest::Url; + /// # use reqwest_retry::policies::ExponentialBackoff; + /// # use std::time::Duration; + /// # + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// let deleted_key = client.key(123).delete().await.unwrap(); + /// # Ok(()) + /// # } + /// ``` pub fn key(&self, key_id: i64) -> Key<'_> { Key::new(self, key_id) } @@ -369,8 +577,24 @@ impl Client { /// Provides access to endpoints for managing all of your presets. /// /// # Example - /// ``` - /// client.presets().list().await?; + /// ```ignore + /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// # use typesense_codegen::models; + /// # use reqwest::Url; + /// # use reqwest_retry::policies::ExponentialBackoff; + /// # use std::time::Duration; + /// # + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// let list_of_presets = client.presets().retrieve().await.unwrap(); + /// # Ok(()) + /// # } /// ``` pub fn presets(&self) -> Presets { Presets::new(self) @@ -382,8 +606,24 @@ impl Client { /// * `preset_id` - The ID of the preset to manage. /// /// # Example - /// ``` - /// client.preset("my-preset").retrieve().await?; + /// ```ignore + /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// # use typesense_codegen::models; + /// # use reqwest::Url; + /// # use reqwest_retry::policies::ExponentialBackoff; + /// # use std::time::Duration; + /// # + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// let preset = client.preset("my-preset").retrieve().await.unwrap(); + /// # Ok(()) + /// # } /// ``` pub fn preset<'a>(&'a self, preset_id: &'a str) -> Preset<'a> { Preset::new(self, preset_id) @@ -393,18 +633,51 @@ impl Client { /// /// # Example /// - /// ```no_run - /// client.stemming().dictionaries().retrieve().await?; + /// ``` + /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// # use reqwest::Url; + /// # use reqwest_retry::policies::ExponentialBackoff; + /// # use std::time::Duration; + /// # + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # + /// # let client = Client::new(config)?; + /// let response = client.stemming().dictionaries().retrieve().await.unwrap(); + /// # println!("{:#?}", response); + /// # Ok(()) + /// # } /// ``` pub fn stemming(&self) -> Stemming { Stemming::new(self) } - // --- Stopwords Accessors --- - /// Provides access to endpoints for managing the collection of stopwords sets. /// - /// Example: `client.stopwords().retrieve().await` + /// # Example + /// ```ignore + /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// # use reqwest::Url; + /// # use reqwest_retry::policies::ExponentialBackoff; + /// # use std::time::Duration; + /// # + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// let all_stopwords = client.stopwords().retrieve().await.unwrap(); + /// # Ok(()) + /// # } + /// ``` pub fn stopwords(&self) -> Stopwords<'_> { Stopwords::new(self) } @@ -414,29 +687,79 @@ impl Client { /// # Arguments /// * `set_id` - The ID of the stopwords set to manage. /// - /// Example: `client.stopword("common_words").retrieve().await` + /// # Example + /// ```ignore + /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// # use reqwest::Url; + /// # use reqwest_retry::policies::ExponentialBackoff; + /// # use std::time::Duration; + /// # + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// let my_stopword_set = client.stopword("common_words").retrieve().await.unwrap(); + /// # Ok(()) + /// # } + /// ``` pub fn stopword<'a>(&'a self, set_id: &'a str) -> Stopword<'a> { Stopword::new(self, set_id) } + + /// Provides access to the multi search endpoint. + /// + /// # Example + /// ```ignore + /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// # use typesense_codegen::models; + /// # use reqwest::Url; + /// # use reqwest_retry::policies::ExponentialBackoff; + /// # use std::time::Duration; + /// # + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// # let search_requests = models::MultiSearchSearchesParameter { + /// # searches: vec![models::MultiSearchCollectionParameters { + /// # collection: Some("products".to_string()), + /// # q: Some("phone".to_string()), + /// # query_by: Some("name".to_string()), + /// # ..Default::default() + /// # }], + /// # ..Default::default() + /// # }; + /// # let common_params = models::MultiSearchParameters::default(); + /// let results = client.multi_search().perform(search_requests, common_params).await.unwrap(); + /// # Ok(()) + /// # } + /// ``` + pub fn multi_search(&self) -> MultiSearch<'_> { + MultiSearch::new(self) + } } /// A helper function to determine if an error is worth retrying on another node. -fn is_retriable(error: &Error) -> bool +fn is_retriable(error: &apis::Error) -> bool where - E: std::fmt::Debug, - apis::Error: std::fmt::Display + std::fmt::Debug, + E: std::fmt::Debug + 'static, + apis::Error: std::error::Error + 'static, { match error { + // Server-side errors (5xx) indicate a problem with the node, so we should try another. + apis::Error::ResponseError(content) => content.status.is_server_error(), + // Underlying reqwest errors (e.g. connection refused) are retriable. // Network-level errors from middleware are always retriable. - Error::Middleware(_) => true, - Error::Api(api_err) => match api_err { - // Server-side errors (5xx) indicate a problem with the node, so we should try another. - apis::Error::ResponseError(content) => content.status.is_server_error(), - // Underlying reqwest errors (e.g. connection refused) are retriable. - apis::Error::Reqwest(_) => true, - // Client-side (4xx) or parsing errors are not retriable as the request is likely invalid. - _ => false, - }, - Error::AllNodesFailed(_) => false, + apis::Error::Reqwest(_) | apis::Error::ReqwestMiddleware(_) => true, + // Client-side (4xx) or parsing errors are not retriable as the request is likely invalid. + _ => false, } } diff --git a/typesense/src/client/multi_search.rs b/typesense/src/client/multi_search.rs index 33999d4..54e3f1a 100644 --- a/typesense/src/client/multi_search.rs +++ b/typesense/src/client/multi_search.rs @@ -20,6 +20,11 @@ pub struct MultiSearch<'a> { } impl<'a> MultiSearch<'a> { + /// Creates a new `MultiSearch` instance. + pub(super) fn new(client: &'a Client) -> Self { + Self { client } + } + /// Make multiple search requests in a single HTTP request to avoid round-trip network latencies. /// /// You can use it in two different modes: diff --git a/typesense/src/client/stemming/dictionaries.rs b/typesense/src/client/stemming/dictionaries.rs index 8ea1c40..480e252 100644 --- a/typesense/src/client/stemming/dictionaries.rs +++ b/typesense/src/client/stemming/dictionaries.rs @@ -47,7 +47,7 @@ impl<'a> Dictionaries<'a> { } /// Retrieves a list of all available stemming dictionaries. - pub async fn list( + pub async fn retrieve( &self, ) -> Result< models::ListStemmingDictionaries200Response, diff --git a/typesense/src/client/stemming/dictionary.rs b/typesense/src/client/stemming/dictionary.rs index 8995e2d..223c029 100644 --- a/typesense/src/client/stemming/dictionary.rs +++ b/typesense/src/client/stemming/dictionary.rs @@ -27,7 +27,7 @@ impl<'a> Dictionary<'a> { } /// Retrieves the details of this specific stemming dictionary. - pub async fn get( + pub async fn retrieve( &self, ) -> Result> { let params = stemming_api::GetStemmingDictionaryParams { @@ -40,20 +40,4 @@ impl<'a> Dictionary<'a> { }) .await } - - // Deletes this specific stemming dictionary. - // pub async fn delete( - // &self, - // ) -> Result> - // { - // let params = stemming_api::DeleteStemmingDictionaryParams { - // dictionary_id: self.dictionary_id.to_string(), - // }; - // self.client - // .execute(|config: Arc| { - // let params_for_move = params.clone(); - // async move { stemming_api::delete_stemming_dictionary(&config, params_for_move).await } - // }) - // .await - // } } diff --git a/typesense/tests/api/collection.rs b/typesense/tests/api/collection.rs index d11bfb0..ecb4b2f 100644 --- a/typesense/tests/api/collection.rs +++ b/typesense/tests/api/collection.rs @@ -1,136 +1,136 @@ -#![allow(dead_code)] - -use super::Config; -use serde::{Deserialize, Serialize}; -use typesense::document::Document; -use typesense::Typesense; -use typesense_codegen::apis::collections_api; -use typesense_codegen::models::{CollectionResponse, CollectionSchema}; - -#[derive(Typesense, Serialize, Deserialize)] -#[typesense(collection_name = "companies", default_sorting_field = "num_employees")] -struct Company { - company_name: String, - num_employees: i32, - #[typesense(facet)] - country: String, -} - -fn schema_to_resp(schema: CollectionSchema, resp: &CollectionResponse) -> CollectionResponse { - CollectionResponse { - name: schema.name, - fields: schema.fields, - default_sorting_field: schema.default_sorting_field, - token_separators: schema.token_separators, - enable_nested_fields: schema.enable_nested_fields, - symbols_to_index: schema.symbols_to_index, - num_documents: resp.num_documents, - created_at: resp.created_at, - } -} - -async fn create_collection() { - let collection_schema_response = - collections_api::create_collection(Config::get(), Company::collection_schema()) - .await - .unwrap(); - - assert_eq!(collection_schema_response.num_documents, 0); - assert_eq!( - schema_to_resp(Company::collection_schema(), &collection_schema_response), - collection_schema_response - ); -} - -async fn get_collection() { - let collection_schema_response = collections_api::get_collection(Config::get(), "companies") - .await - .unwrap(); - - assert_eq!(collection_schema_response.num_documents, 1250); - assert_eq!( - schema_to_resp(Company::collection_schema(), &collection_schema_response), - collection_schema_response - ); -} - -async fn delete_collection() { - let collection_schema_response = collections_api::delete_collection(Config::get(), "companies") - .await - .unwrap(); - - assert_eq!(collection_schema_response.num_documents, 1200); - assert_eq!( - schema_to_resp(Company::collection_schema(), &collection_schema_response), - collection_schema_response - ); -} - -async fn get_collections() { - let collection_schema_response = collections_api::get_collections(Config::get()) - .await - .unwrap(); - - assert_eq!(collection_schema_response.len(), 2); -} - -#[cfg(all(feature = "tokio_test", not(target_arch = "wasm32")))] -mod tokio_test { - use super::*; - - #[tokio::test] - async fn create_collection_tokio() { - create_collection().await - } - - #[tokio::test] - async fn get_collection_tokio() { - get_collection().await - } - - #[tokio::test] - async fn delete_collection_tokio() { - delete_collection().await - } - - #[tokio::test] - async fn get_collections_tokio() { - get_collections().await - } -} - -#[cfg(target_arch = "wasm32")] -mod wasm_test { - use super::*; - use wasm_bindgen_test::wasm_bindgen_test; - - wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); - - #[wasm_bindgen_test] - async fn create_collection_wasm() { - console_error_panic_hook::set_once(); - - create_collection().await - } - - #[wasm_bindgen_test] - async fn get_collection_wasm() { - console_error_panic_hook::set_once(); - - get_collection().await - } - - #[wasm_bindgen_test] - async fn delete_collection_wasm() { - console_error_panic_hook::set_once(); - - delete_collection().await - } - - #[wasm_bindgen_test] - async fn get_collections_wasm() { - console_error_panic_hook::set_once(); - - get_collections().await - } -} +// #![allow(dead_code)] + +// use super::Config; +// use serde::{Deserialize, Serialize}; +// use typesense::document::Document; +// use typesense::Typesense; +// use typesense_codegen::apis::collections_api; +// use typesense_codegen::models::{CollectionResponse, CollectionSchema}; + +// #[derive(Typesense, Serialize, Deserialize)] +// #[typesense(collection_name = "companies", default_sorting_field = "num_employees")] +// struct Company { +// company_name: String, +// num_employees: i32, +// #[typesense(facet)] +// country: String, +// } + +// fn schema_to_resp(schema: CollectionSchema, resp: &CollectionResponse) -> CollectionResponse { +// CollectionResponse { +// name: schema.name, +// fields: schema.fields, +// default_sorting_field: schema.default_sorting_field, +// token_separators: schema.token_separators, +// enable_nested_fields: schema.enable_nested_fields, +// symbols_to_index: schema.symbols_to_index, +// num_documents: resp.num_documents, +// created_at: resp.created_at, +// } +// } + +// async fn create_collection() { +// let collection_schema_response = +// collections_api::create_collection(Config::get(), Company::collection_schema()) +// .await +// .unwrap(); + +// assert_eq!(collection_schema_response.num_documents, 0); +// assert_eq!( +// schema_to_resp(Company::collection_schema(), &collection_schema_response), +// collection_schema_response +// ); +// } + +// async fn get_collection() { +// let collection_schema_response = collections_api::get_collection(Config::get(), "companies") +// .await +// .unwrap(); + +// assert_eq!(collection_schema_response.num_documents, 1250); +// assert_eq!( +// schema_to_resp(Company::collection_schema(), &collection_schema_response), +// collection_schema_response +// ); +// } + +// async fn delete_collection() { +// let collection_schema_response = collections_api::delete_collection(Config::get(), "companies") +// .await +// .unwrap(); + +// assert_eq!(collection_schema_response.num_documents, 1200); +// assert_eq!( +// schema_to_resp(Company::collection_schema(), &collection_schema_response), +// collection_schema_response +// ); +// } + +// async fn get_collections() { +// let collection_schema_response = collections_api::get_collections(Config::get()) +// .await +// .unwrap(); + +// assert_eq!(collection_schema_response.len(), 2); +// } + +// #[cfg(all(feature = "tokio_test", not(target_arch = "wasm32")))] +// mod tokio_test { +// use super::*; + +// #[tokio::test] +// async fn create_collection_tokio() { +// create_collection().await +// } + +// #[tokio::test] +// async fn get_collection_tokio() { +// get_collection().await +// } + +// #[tokio::test] +// async fn delete_collection_tokio() { +// delete_collection().await +// } + +// #[tokio::test] +// async fn get_collections_tokio() { +// get_collections().await +// } +// } + +// #[cfg(target_arch = "wasm32")] +// mod wasm_test { +// use super::*; +// use wasm_bindgen_test::wasm_bindgen_test; + +// wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + +// #[wasm_bindgen_test] +// async fn create_collection_wasm() { +// console_error_panic_hook::set_once(); + +// create_collection().await +// } + +// #[wasm_bindgen_test] +// async fn get_collection_wasm() { +// console_error_panic_hook::set_once(); + +// get_collection().await +// } + +// #[wasm_bindgen_test] +// async fn delete_collection_wasm() { +// console_error_panic_hook::set_once(); + +// delete_collection().await +// } + +// #[wasm_bindgen_test] +// async fn get_collections_wasm() { +// console_error_panic_hook::set_once(); + +// get_collections().await +// } +// } diff --git a/typesense/tests/api/documents.rs b/typesense/tests/api/documents.rs index 885d56d..c8e8f5b 100644 --- a/typesense/tests/api/documents.rs +++ b/typesense/tests/api/documents.rs @@ -1,111 +1,111 @@ -#![allow(dead_code)] - -use super::Config; -use serde::{Deserialize, Serialize}; -use typesense::document::Document; -use typesense::models::SearchParameters; -use typesense::Typesense; -use typesense_codegen::apis::documents_api; - -#[derive(Typesense, Serialize, Deserialize)] -#[typesense(collection_name = "companies", default_sorting_field = "num_employees")] -struct Company { - company_name: String, - num_employees: i32, - #[typesense(facet)] - country: String, -} - -async fn import_documents() { - let documents = [ - Company { - company_name: "test".to_owned(), - num_employees: 1, - country: "c1".to_owned(), - }, - Company { - company_name: "test2".to_owned(), - num_employees: 2, - country: "c2".to_owned(), - }, - ] - .map(|c| serde_json::to_string(&c).unwrap()) - .join("\n"); - - let resp = documents_api::import_documents( - Config::get(), - &Company::collection_schema().name, - documents, - None, - ) - .await - .unwrap(); - - assert_eq!(&resp, "{\"success\":true}\n{\"success\":true}"); -} - -async fn search_collection() { - let search = SearchParameters { - q: "test".to_owned(), - query_by: "company_name".to_owned(), - ..Default::default() - }; - - let resp = documents_api::search_collection::( - Config::get(), - &Company::collection_schema().name, - search, - ) - .await - .unwrap(); - - assert_eq!(resp.found, Some(2)); - assert_eq!( - resp.hits - .unwrap() - .first() - .unwrap() - .document - .as_ref() - .unwrap() - .company_name, - "test".to_owned() - ); -} - -#[cfg(all(feature = "tokio_test", not(target_arch = "wasm32")))] -mod tokio_test { - use super::*; - - #[tokio::test] - async fn import_documents_tokio() { - import_documents().await - } - - #[tokio::test] - async fn search_collection_tokio() { - search_collection().await - } -} - -#[cfg(target_arch = "wasm32")] -mod wasm_test { - use super::*; - use wasm_bindgen_test::wasm_bindgen_test; - - wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); - - #[wasm_bindgen_test] - async fn import_documents_wasm() { - console_error_panic_hook::set_once(); - - import_documents().await - } - - #[wasm_bindgen_test] - async fn search_collection_wasm() { - console_error_panic_hook::set_once(); - - search_collection().await - } -} +// #![allow(dead_code)] + +// use super::Config; +// use serde::{Deserialize, Serialize}; +// use typesense::document::Document; +// use typesense::models::SearchParameters; +// use typesense::Typesense; +// use typesense_codegen::apis::documents_api; + +// #[derive(Typesense, Serialize, Deserialize)] +// #[typesense(collection_name = "companies", default_sorting_field = "num_employees")] +// struct Company { +// company_name: String, +// num_employees: i32, +// #[typesense(facet)] +// country: String, +// } + +// async fn import_documents() { +// let documents = [ +// Company { +// company_name: "test".to_owned(), +// num_employees: 1, +// country: "c1".to_owned(), +// }, +// Company { +// company_name: "test2".to_owned(), +// num_employees: 2, +// country: "c2".to_owned(), +// }, +// ] +// .map(|c| serde_json::to_string(&c).unwrap()) +// .join("\n"); + +// let resp = documents_api::import_documents( +// Config::get(), +// &Company::collection_schema().name, +// documents, +// None, +// ) +// .await +// .unwrap(); + +// assert_eq!(&resp, "{\"success\":true}\n{\"success\":true}"); +// } + +// async fn search_collection() { +// let search = SearchParameters { +// q: "test".to_owned(), +// query_by: "company_name".to_owned(), +// ..Default::default() +// }; + +// let resp = documents_api::search_collection::( +// Config::get(), +// &Company::collection_schema().name, +// search, +// ) +// .await +// .unwrap(); + +// assert_eq!(resp.found, Some(2)); +// assert_eq!( +// resp.hits +// .unwrap() +// .first() +// .unwrap() +// .document +// .as_ref() +// .unwrap() +// .company_name, +// "test".to_owned() +// ); +// } + +// #[cfg(all(feature = "tokio_test", not(target_arch = "wasm32")))] +// mod tokio_test { +// use super::*; + +// #[tokio::test] +// async fn import_documents_tokio() { +// import_documents().await +// } + +// #[tokio::test] +// async fn search_collection_tokio() { +// search_collection().await +// } +// } + +// #[cfg(target_arch = "wasm32")] +// mod wasm_test { +// use super::*; +// use wasm_bindgen_test::wasm_bindgen_test; + +// wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + +// #[wasm_bindgen_test] +// async fn import_documents_wasm() { +// console_error_panic_hook::set_once(); + +// import_documents().await +// } + +// #[wasm_bindgen_test] +// async fn search_collection_wasm() { +// console_error_panic_hook::set_once(); + +// search_collection().await +// } +// } diff --git a/typesense/tests/api/lib.rs b/typesense/tests/api/lib.rs index 94a3985..b36f5a3 100644 --- a/typesense/tests/api/lib.rs +++ b/typesense/tests/api/lib.rs @@ -1,41 +1,41 @@ -use std::sync::OnceLock; -use typesense_codegen::apis::configuration::{ApiKey, Configuration}; - -mod collection; -mod documents; - -static CONFIG: OnceLock = OnceLock::new(); - -#[cfg(not(target_arch = "wasm32"))] -fn init() -> Configuration { - let _ = dotenvy::dotenv(); - - let base_path = std::env::var("URL").expect("URL must be present in .env"); - let key = std::env::var("API_KEY").expect("API_KEY must be present in .env"); - - Configuration { - base_path, - api_key: Some(ApiKey { prefix: None, key }), - ..Default::default() - } -} - -#[cfg(target_arch = "wasm32")] -fn init() -> Configuration { - let base_path = "http://localhost:5000".to_owned(); - let key = "VerySecretKey".to_owned(); - - Configuration { - base_path, - api_key: Some(ApiKey { prefix: None, key }), - ..Default::default() - } -} - -pub struct Config; - -impl Config { - pub fn get() -> &'static Configuration { - CONFIG.get_or_init(init) - } -} +// use std::sync::OnceLock; +// use typesense_codegen::apis::configuration::{ApiKey, Configuration}; + +// mod collection; +// mod documents; + +// static CONFIG: OnceLock = OnceLock::new(); + +// #[cfg(not(target_arch = "wasm32"))] +// fn init() -> Configuration { +// let _ = dotenvy::dotenv(); + +// let base_path = std::env::var("URL").expect("URL must be present in .env"); +// let key = std::env::var("API_KEY").expect("API_KEY must be present in .env"); + +// Configuration { +// base_path, +// api_key: Some(ApiKey { prefix: None, key }), +// ..Default::default() +// } +// } + +// #[cfg(target_arch = "wasm32")] +// fn init() -> Configuration { +// let base_path = "http://localhost:5000".to_owned(); +// let key = "VerySecretKey".to_owned(); + +// Configuration { +// base_path, +// api_key: Some(ApiKey { prefix: None, key }), +// ..Default::default() +// } +// } + +// pub struct Config; + +// impl Config { +// pub fn get() -> &'static Configuration { +// CONFIG.get_or_init(init) +// } +// } diff --git a/typesense/tests/client/aliases_test.rs b/typesense/tests/client/aliases_test.rs new file mode 100644 index 0000000..4a3c6b9 --- /dev/null +++ b/typesense/tests/client/aliases_test.rs @@ -0,0 +1,92 @@ +use typesense_codegen::models::{CollectionAliasSchema, CollectionSchema, Field}; + +use super::{get_client, new_id}; + +#[tokio::test] +async fn test_aliases_and_alias_lifecycle() { + let client = get_client(); + let collection_name = new_id("products"); + let alias_name = new_id("products_alias"); + + // --- 1. Create a collection to alias to --- + let collection_schema = CollectionSchema { + name: collection_name.clone(), + fields: vec![Field { + name: "name".to_string(), + r#type: "string".to_string(), + ..Default::default() + }], + ..Default::default() + }; + + let create_collection_result = client.collections().create(collection_schema).await; + assert!( + create_collection_result.is_ok(), + "Failed to create collection for alias test" + ); + + // --- 2. Create (Upsert) an alias --- + let alias_schema = CollectionAliasSchema { + collection_name: collection_name.clone(), + }; + + let upsert_result = client.aliases().upsert(&alias_name, alias_schema).await; + assert!(upsert_result.is_ok(), "Failed to create alias"); + let created_alias = upsert_result.unwrap(); + assert_eq!(created_alias.name, alias_name); + assert_eq!(created_alias.collection_name, collection_name); + + // --- 3. Retrieve the specific alias by name --- + let retrieve_one_result = client.alias(&alias_name).retrieve().await; + assert!( + retrieve_one_result.is_ok(), + "Failed to retrieve the specific alias." + ); + let retrieved_alias = retrieve_one_result.unwrap(); + assert_eq!(retrieved_alias.name, alias_name); + assert_eq!(retrieved_alias.collection_name, collection_name); + + // --- 4. Retrieve all aliases --- + let retrieve_all_result = client.aliases().retrieve().await; + assert!( + retrieve_all_result.is_ok(), + "Failed to retrieve the list of aliases." + ); + let all_aliases_response = retrieve_all_result.unwrap(); + + // --- 5. Find our specific alias within the list --- + let our_alias = all_aliases_response + .aliases + .iter() + .find(|a| a.name == alias_name); + + assert!( + our_alias.is_some(), + "The newly created alias was not found in the list." + ); + + if let Some(alias) = our_alias { + assert_eq!(alias.name, alias_name); + assert_eq!(alias.collection_name, collection_name); + } + + // --- 6. Delete the alias --- + let delete_result = client.alias(&alias_name).delete().await; + assert!(delete_result.is_ok(), "Failed to delete alias"); + let deleted_alias = delete_result.unwrap(); + assert_eq!(deleted_alias.name, alias_name); + + // --- 7. Verify Deletion --- + let get_after_delete_result = client.alias(&alias_name).retrieve().await; + assert!( + get_after_delete_result.is_err(), + "Alias should not exist after deletion" + ); + + // --- 8. Clean up the collection --- + let delete_collection_result = client.collection(&collection_name).delete().await; + assert!( + delete_collection_result.is_ok(), + "Failed to delete collection after alias test" + ); +} diff --git a/typesense/tests/client/analytics_test.rs b/typesense/tests/client/analytics_test.rs new file mode 100644 index 0000000..4d14fdd --- /dev/null +++ b/typesense/tests/client/analytics_test.rs @@ -0,0 +1,123 @@ +use super::{get_client, new_id}; +use serde_json::json; +use typesense::models::analytics_rule_schema::Type::Counter; +use typesense::models::{ + self, AnalyticsEventCreateSchema, AnalyticsRuleParametersDestination, + AnalyticsRuleParametersSource, AnalyticsRuleParametersSourceEventsInner, AnalyticsRuleSchema, +}; +use typesense_codegen::models::{CollectionSchema, Field}; + +#[tokio::test] +async fn test_analytics_rules_and_events_lifecycle() { + let client = get_client(); + let rule_name_1 = new_id("product_clicks"); + let collection_name = new_id("products"); + let event_name = "products_click_event"; + + // --- 1. Create a Collection (via `collections`) --- + let schema = CollectionSchema { + name: collection_name.clone(), + fields: vec![ + Field { + name: "title".to_string(), + r#type: "string".to_string(), + ..Default::default() + }, + Field { + name: "popularity".to_string(), + r#type: "int32".to_string(), + optional: Some(true), + ..Default::default() + }, + ], + ..Default::default() + }; + + let create_result = client.collections().create(schema).await; + assert!(create_result.is_ok(), "Failed to create collection"); + let created_collection = create_result.unwrap(); + assert_eq!(created_collection.name, collection_name); + + // --- 2. Create a Rule (via `rules.create`) --- + let create_schema = AnalyticsRuleSchema { + name: rule_name_1.clone(), + r#type: Counter, + params: Box::new(models::AnalyticsRuleParameters { + source: Box::new(AnalyticsRuleParametersSource { + collections: vec!["products".to_string()], + events: Some(vec![AnalyticsRuleParametersSourceEventsInner { + r#type: "click".to_string(), + weight: 1.0, + name: event_name.to_owned(), + }]), + }), + destination: Box::new(AnalyticsRuleParametersDestination { + collection: "products".to_string(), + counter_field: Some("popularity".to_string()), + }), + ..Default::default() + }), + }; + + let create_result = client.analytics().rules().create(create_schema).await; + assert!(create_result.is_ok(), "Failed to create analytics rule"); + let created_rule = create_result.unwrap(); + assert_eq!(created_rule.name, rule_name_1); + assert_eq!(created_rule.r#type, Counter); + + // --- 3. Retrieve the specific Rule (via `rule`) --- + let retrieve_one_result = client.analytics().rule(&rule_name_1).retrieve().await; + assert!( + retrieve_one_result.is_ok(), + "Failed to retrieve the newly created rule." + ); + let retrieved_rule = retrieve_one_result.unwrap(); + assert_eq!(retrieved_rule.name, rule_name_1); + + // --- 4. Retrieve all Rules (via `rules`) --- + let retrieve_all_result = client.analytics().rules().retrieve().await; + assert!( + retrieve_all_result.is_ok(), + "Failed to retrieve the list of rules." + ); + let all_rules_response = retrieve_all_result.unwrap(); + assert!( + all_rules_response.rules.as_ref().unwrap().len() >= 1, + "Expected at least one rule to be present." + ); + assert!(all_rules_response + .rules + .unwrap() + .iter() + .any(|r| r.name == rule_name_1)); + + // --- 5. Sending click events (via `events`) --- + let event_result = client + .analytics() + .events() + .create(AnalyticsEventCreateSchema { + r#type: "click".to_string(), + name: event_name.to_owned(), + data: json!({ + "doc_id": "1024", + "user_id": "111112" + }), + }) + .await; + + assert!(event_result.is_ok(), "Failed to send the click event."); + assert!(event_result.unwrap().ok, "Unsuccessful click event."); + + // --- 6. Delete a Rule (via `rule`) --- + let delete_result = client.analytics().rule(&rule_name_1).delete().await; + assert!(delete_result.is_ok(), "Failed to delete rule"); + let deleted_response = delete_result.unwrap(); + assert_eq!(deleted_response.name, rule_name_1); + + // --- 7. Verify Deletion --- + let get_after_delete_result = client.analytics().rule(&rule_name_1).retrieve().await; + assert!( + get_after_delete_result.is_err(), + "Rule should not exist after deletion" + ); +} diff --git a/typesense/tests/client/client_test.rs b/typesense/tests/client/client_test.rs new file mode 100644 index 0000000..626d22c --- /dev/null +++ b/typesense/tests/client/client_test.rs @@ -0,0 +1,326 @@ +use reqwest::Url; +use reqwest_retry::policies::ExponentialBackoff; +use std::time::Duration; +use typesense::client::*; +use typesense::models::CollectionResponse; +use wiremock::matchers::{header, method, path}; +use wiremock::{Mock, MockServer, ResponseTemplate}; + +// Helper to create a mock Typesense server for a successful collection retrieval. +async fn setup_mock_server_ok(server: &MockServer, collection_name: &str) { + let response_body = CollectionResponse { + name: collection_name.to_string(), + ..Default::default() + }; + + Mock::given(method("GET")) + .and(path(format!("/collections/{}", collection_name))) + .and(header("X-TYPESENSE-API-KEY", "test-key")) + .respond_with(ResponseTemplate::new(200).set_body_json(response_body)) + .mount(server) + .await; +} + +// Helper to create a mock Typesense server that returns a server error. +async fn setup_mock_server_503(server: &MockServer, collection_name: &str) { + Mock::given(method("GET")) + .and(path(format!("/collections/{}", collection_name))) + .respond_with(ResponseTemplate::new(503)) + .mount(server) + .await; +} + +// Helper to create a mock Typesense server that returns a 404 Not Found error. +async fn setup_mock_server_404(server: &MockServer, collection_name: &str) { + Mock::given(method("GET")) + .and(path(format!("/collections/{}", collection_name))) + .respond_with(ResponseTemplate::new(404)) + .mount(server) + .await; +} + +// Helper function to create a client configuration for tests. +fn get_test_config(nodes: Vec, nearest_node: Option) -> MultiNodeConfiguration { + MultiNodeConfiguration { + nodes, + nearest_node, + api_key: "test-key".to_string(), + healthcheck_interval: Duration::from_secs(60), + retry_policy: ExponentialBackoff::builder().build_with_max_retries(0), + connection_timeout: Duration::from_secs(1), + } +} + +#[tokio::test] +async fn test_success_on_first_node() { + let server1 = MockServer::start().await; + setup_mock_server_ok(&server1, "products").await; + + let config = get_test_config(vec![Url::parse(&server1.uri()).unwrap()], None); + let client = Client::new(config).unwrap(); + + let result = client.collection("products").retrieve().await; + + assert!(result.is_ok()); + assert_eq!(result.unwrap().name, "products"); + // Check that the server received exactly one request. + assert_eq!(server1.received_requests().await.unwrap().len(), 1); +} + +#[tokio::test] +async fn test_failover_to_second_node() { + let server1 = MockServer::start().await; + let server2 = MockServer::start().await; + setup_mock_server_503(&server1, "products").await; + setup_mock_server_ok(&server2, "products").await; + + let config = get_test_config( + vec![ + Url::parse(&server1.uri()).unwrap(), + Url::parse(&server2.uri()).unwrap(), + ], + None, + ); + let client = Client::new(config).unwrap(); + + let result = client.collection("products").retrieve().await; + assert!(result.is_ok()); + + // The first server should have been tried and failed. + assert_eq!(server1.received_requests().await.unwrap().len(), 1); + // The second server should have been tried and succeeded. + assert_eq!(server2.received_requests().await.unwrap().len(), 1); +} + +#[tokio::test] +async fn test_nearest_node_is_prioritized() { + let nearest_server = MockServer::start().await; + let regular_server = MockServer::start().await; + setup_mock_server_ok(&nearest_server, "products").await; + setup_mock_server_ok(®ular_server, "products").await; + + let config = get_test_config( + vec![Url::parse(®ular_server.uri()).unwrap()], + Some(Url::parse(&nearest_server.uri()).unwrap()), + ); + let client = Client::new(config).unwrap(); + + let result = client.collection("products").retrieve().await; + assert!(result.is_ok()); + + // Only the nearest node should have received a request. + assert_eq!(nearest_server.received_requests().await.unwrap().len(), 1); + assert_eq!(regular_server.received_requests().await.unwrap().len(), 0); +} + +#[tokio::test] +async fn test_failover_from_nearest_to_regular_node() { + let nearest_server = MockServer::start().await; + let regular_server = MockServer::start().await; + setup_mock_server_503(&nearest_server, "products").await; + setup_mock_server_ok(®ular_server, "products").await; + + let config = get_test_config( + vec![Url::parse(®ular_server.uri()).unwrap()], + Some(Url::parse(&nearest_server.uri()).unwrap()), + ); + let client = Client::new(config).unwrap(); + + let result = client.collection("products").retrieve().await; + assert!(result.is_ok()); + + // Nearest node should have failed. + assert_eq!(nearest_server.received_requests().await.unwrap().len(), 1); + // Regular node should have succeeded. + assert_eq!(regular_server.received_requests().await.unwrap().len(), 1); +} + +#[tokio::test] +async fn test_round_robin_failover() { + let server1 = MockServer::start().await; + let server2 = MockServer::start().await; + let server3 = MockServer::start().await; + setup_mock_server_503(&server1, "products").await; + setup_mock_server_503(&server2, "products").await; + setup_mock_server_ok(&server3, "products").await; + + let config = get_test_config( + vec![ + Url::parse(&server1.uri()).unwrap(), + Url::parse(&server2.uri()).unwrap(), + Url::parse(&server3.uri()).unwrap(), + ], + None, + ); + let client = Client::new(config).unwrap(); + + // First request should fail over to the third node + let result = client.collection("products").retrieve().await; + assert!(result.is_ok()); + assert_eq!(server1.received_requests().await.unwrap().len(), 1); + assert_eq!(server2.received_requests().await.unwrap().len(), 1); + assert_eq!(server3.received_requests().await.unwrap().len(), 1); + + // The next request should start from the now-healthy 3rd node, but round-robin + // logic will have advanced the internal counter. Let's see it wrap around. + // We expect the next attempt to be on server 3 again, then 1 (if 3 fails). + + // Reset server 3 to also fail + server3.reset().await; + setup_mock_server_503(&server3, "products").await; + // Make server 1 healthy again + server1.reset().await; + setup_mock_server_ok(&server1, "products").await; + + let result2 = client.collection("products").retrieve().await; + assert!(result2.is_ok()); + + // Server 3 was tried first and failed. + assert_eq!(server3.received_requests().await.unwrap().len(), 1); + // Server 1 was tried next and succeeded. + assert_eq!(server1.received_requests().await.unwrap().len(), 1); + // Server 2 was not touched this time. + assert_eq!(server2.received_requests().await.unwrap().len(), 1); // Remains 1 from first call +} + +#[tokio::test] +async fn test_health_check_and_node_recovery() { + let server1 = MockServer::start().await; + let server2 = MockServer::start().await; + + setup_mock_server_503(&server1, "products").await; + setup_mock_server_ok(&server2, "products").await; + + let mut config = get_test_config( + vec![ + Url::parse(&server1.uri()).unwrap(), + Url::parse(&server2.uri()).unwrap(), + ], + None, + ); + // Use a very short healthcheck interval for the test + config.healthcheck_interval = Duration::from_millis(500); + let client = Client::new(config).unwrap(); + + // 1. First request fails over to server2, marking server1 as unhealthy. + assert!(client.collection("products").retrieve().await.is_ok()); + assert_eq!(server1.received_requests().await.unwrap().len(), 1); + assert_eq!(server2.received_requests().await.unwrap().len(), 1); + + // 2. Immediate second request should go directly to server2. + assert!(client.collection("products").retrieve().await.is_ok()); + assert_eq!(server1.received_requests().await.unwrap().len(), 1); // No new request + assert_eq!(server2.received_requests().await.unwrap().len(), 2); // Got another request + + // 3. Wait for the healthcheck interval to pass. + tokio::time::sleep(Duration::from_millis(600)).await; + + // 4. Make server1 healthy again. + server1.reset().await; + setup_mock_server_ok(&server1, "products").await; + + // 5. The next request should try server1 again (due to healthcheck expiry) and succeed. + assert!(client.collection("products").retrieve().await.is_ok()); + assert_eq!(server1.received_requests().await.unwrap().len(), 1); // Server 1 received its first successful req + assert_eq!(server2.received_requests().await.unwrap().len(), 2); // No new request for server 2 +} + +#[tokio::test] +async fn test_all_nodes_fail() { + let server1 = MockServer::start().await; + let server2 = MockServer::start().await; + setup_mock_server_503(&server1, "products").await; + setup_mock_server_503(&server2, "products").await; + + let config = get_test_config( + vec![ + Url::parse(&server1.uri()).unwrap(), + Url::parse(&server2.uri()).unwrap(), + ], + None, + ); + let client = Client::new(config).unwrap(); + + let result = client.collection("products").retrieve().await; + assert!(result.is_err()); + + match result.err().unwrap() { + Error::AllNodesFailed { .. } => { /* This is the expected outcome */ } + _ => panic!("Expected AllNodesFailed error"), + } + + // Both servers should have been tried. + assert_eq!(server1.received_requests().await.unwrap().len(), 1); + assert_eq!(server2.received_requests().await.unwrap().len(), 1); +} + +#[tokio::test] +async fn test_fail_fast_on_non_retriable_error() { + let server1 = MockServer::start().await; + let server2 = MockServer::start().await; + + setup_mock_server_404(&server1, "products").await; + setup_mock_server_ok(&server2, "products").await; + + let config = get_test_config( + vec![ + Url::parse(&server1.uri()).unwrap(), + Url::parse(&server2.uri()).unwrap(), + ], + None, + ); + let client = Client::new(config).unwrap(); + + let result = client.collection("products").retrieve().await; + assert!(result.is_err()); + + // Check that the error is the non-retriable API error. + match result.err().unwrap() { + Error::Api(typesense_codegen::apis::Error::ResponseError(content)) => { + assert_eq!(content.status, reqwest::StatusCode::NOT_FOUND); + } + e => panic!("Expected an API error, but got {:?}", e), + } + + // The first server should have been tried. + assert_eq!(server1.received_requests().await.unwrap().len(), 1); + // The second server should NOT have been tried. + assert_eq!(server2.received_requests().await.unwrap().len(), 0); +} + +#[tokio::test] +async fn test_load_balancing_with_healthy_nodes() { + // 1. Setup three healthy mock servers + let server1 = MockServer::start().await; + let server2 = MockServer::start().await; + let server3 = MockServer::start().await; + setup_mock_server_ok(&server1, "products").await; + setup_mock_server_ok(&server2, "products").await; + setup_mock_server_ok(&server3, "products").await; + + // 2. Setup client with the three nodes + let config = get_test_config( + vec![ + Url::parse(&server1.uri()).unwrap(), + Url::parse(&server2.uri()).unwrap(), + Url::parse(&server3.uri()).unwrap(), + ], + None, + ); + let client = Client::new(config).unwrap(); + + // 3. Make three consecutive requests + let result1 = client.collection("products").retrieve().await; + let result2 = client.collection("products").retrieve().await; + let result3 = client.collection("products").retrieve().await; + + // 4. Assert all requests were successful + assert!(result1.is_ok()); + assert!(result2.is_ok()); + assert!(result3.is_ok()); + + // 5. Assert that each server received exactly one request, proving round-robin distribution + assert_eq!(server1.received_requests().await.unwrap().len(), 1); + assert_eq!(server2.received_requests().await.unwrap().len(), 1); + assert_eq!(server3.received_requests().await.unwrap().len(), 1); +} diff --git a/typesense/tests/client/collections_test.rs b/typesense/tests/client/collections_test.rs new file mode 100644 index 0000000..f327233 --- /dev/null +++ b/typesense/tests/client/collections_test.rs @@ -0,0 +1,133 @@ +use typesense_codegen::models::{CollectionSchema, CollectionUpdateSchema, Field}; + +use super::{get_client, new_id}; + +#[tokio::test] +async fn test_collections_and_collection_lifecycle() { + let client = get_client(); + let collection_name = new_id("products"); + + // --- 1. Create a Collection (via `collections`) --- + let schema = CollectionSchema { + name: collection_name.clone(), + fields: vec![ + Field { + name: "name".to_string(), + r#type: "string".to_string(), + ..Default::default() + }, + Field { + name: "price".to_string(), + r#type: "int32".to_string(), + ..Default::default() + }, + ], + ..Default::default() + }; + + let create_result = client.collections().create(schema).await; + assert!(create_result.is_ok(), "Failed to create collection"); + let created_collection = create_result.unwrap(); + assert_eq!(created_collection.name, collection_name); + + // --- 2. Retrieve the specific Collection (via `collection`) --- + let retrieve_one_result = client.collection(&collection_name).retrieve().await; + assert!( + retrieve_one_result.is_ok(), + "Failed to retrieve the newly created collection." + ); + let retrieved_collection = retrieve_one_result.unwrap(); + assert_eq!(retrieved_collection.name, collection_name); + assert_eq!(retrieved_collection.fields.len(), 2); + + // --- 3. Retrieve all collections (via `collections`) --- + let retrieve_all_result = client.collections().retrieve().await; + assert!( + retrieve_all_result.is_ok(), + "Failed to retrieve the list of collections." + ); + let all_collections = retrieve_all_result.unwrap(); + + // --- 4. Find our specific collection within the list --- + let our_collection = all_collections.iter().find(|c| c.name == collection_name); + assert!( + our_collection.is_some(), + "The newly created collection was not found in the list." + ); + + // --- 5. Update the Collection to add and drop a field (via `collection`) --- + let update_schema = CollectionUpdateSchema { + fields: vec![ + // Add a new field + Field { + name: "description".to_string(), + r#type: "string".to_string(), + optional: Some(true), + ..Default::default() + }, + // Drop an existing field + Field { + name: "price".to_string(), + drop: Some(true), + ..Default::default() + }, + ], + }; + + let update_result = client + .collection(&collection_name) + .update(update_schema) + .await; + assert!(update_result.is_ok(), "Failed to update collection"); + + // The update response contains the fields that were modified + let updated_fields_response = update_result.unwrap(); + assert_eq!( + updated_fields_response.fields.len(), + 2, + "The update response should contain the two modified fields." + ); + + // --- 6. Verify the update by retrieving the full schema again --- + let retrieve_after_update_result = client.collection(&collection_name).retrieve().await; + let retrieved_after_update = retrieve_after_update_result.unwrap(); + + // Initial fields: name, price. Update: +description, -price. Final fields: name, description. + assert_eq!( + retrieved_after_update.fields.len(), + 2, + "The number of fields should be 2 after the update." + ); + assert!( + retrieved_after_update + .fields + .iter() + .any(|f| f.name == "name"), + "The 'name' field should still exist." + ); + assert!( + retrieved_after_update + .fields + .iter() + .any(|f| f.name == "description"), + "The 'description' field should have been added." + ); + assert!( + !retrieved_after_update + .fields + .iter() + .any(|f| f.name == "price"), + "The 'price' field should have been dropped." + ); + + // --- 7. Delete the Collection (via `collection`) --- + let delete_result = client.collection(&collection_name).delete().await; + assert!(delete_result.is_ok(), "Failed to delete collection"); + + // --- 8. Verify Deletion --- + let get_after_delete_result = client.collection(&collection_name).retrieve().await; + assert!( + get_after_delete_result.is_err(), + "Collection should not exist after deletion" + ); +} diff --git a/typesense/tests/client/conversation_models_test.rs b/typesense/tests/client/conversation_models_test.rs new file mode 100644 index 0000000..b2d63af --- /dev/null +++ b/typesense/tests/client/conversation_models_test.rs @@ -0,0 +1,353 @@ +use std::time::Duration; + +use reqwest_retry::policies::ExponentialBackoff; +use typesense::{ + client::{Error as TypesenseError, MultiNodeConfiguration}, + models::ConversationModelUpdateSchema, +}; +use typesense_codegen::models::{CollectionSchema, ConversationModelCreateSchema, Field}; + +use super::{get_client, new_id}; + +#[tokio::test] +async fn test_create_model_with_invalid_key_fails_as_expected() { + let client = get_client(); + let model_id = new_id("gpt-4-invalid-key-test"); + let collection_name = new_id("conversation_store_invalid"); + + // --- 1. Setup: Create the prerequisite collection for history --- + let schema = CollectionSchema { + name: collection_name.clone(), + fields: vec![ + Field { + name: "conversation_id".to_string(), + r#type: "string".to_string(), + ..Default::default() + }, + Field { + name: "model_id".to_string(), + r#type: "string".to_string(), + ..Default::default() + }, + Field { + name: "timestamp".to_string(), + r#type: "int32".to_string(), + ..Default::default() + }, + Field { + name: "role".to_string(), + r#type: "string".to_string(), + index: Some(false), + ..Default::default() + }, + Field { + name: "message".to_string(), + r#type: "string".to_string(), + index: Some(false), + ..Default::default() + }, + ], + ..Default::default() + }; + let create_collection_result = client.collections().create(schema).await; + assert!( + create_collection_result.is_ok(), + "Setup failed: Could not create the collection needed for the test." + ); + + // --- 2. Action: Attempt to create a model with a deliberately invalid API key --- + let create_schema = ConversationModelCreateSchema { + id: Some(model_id.clone()), + model_name: "openai/gpt-4".to_string(), + api_key: Some("THIS_IS_AN_INVALID_KEY".to_string()), + history_collection: collection_name.clone(), + max_bytes: 10000, + ..Default::default() + }; + let create_result = client.conversations().models().create(create_schema).await; + + // --- 3. Assertion: Verify that the creation failed with the correct error --- + assert!( + create_result.is_err(), + "Model creation should have failed due to an invalid API key, but it succeeded." + ); + match create_result.err() { + Some(TypesenseError::Api(response_content)) => match response_content { + typesense::apis::Error::ResponseError(api_error) => { + assert_eq!( + api_error.status.as_u16(), + 400, + "Expected HTTP status code 400 for an invalid key." + ); + assert!( + api_error.content.contains("Incorrect API key provided"), + "The error message did not match the expected content. Got: {}", + api_error.content + ); + } + other_entity => { + panic!( + "Expected a Status400 error entity but got something else: {:?}", + other_entity + ); + } + }, + other_error => { + panic!( + "Expected a Typesense ResponseError, but got a different kind of error: {:?}", + other_error + ); + } + } + + // --- 4. Teardown: Clean up the collection created during setup --- + let delete_collection_result = client.collection(&collection_name).delete().await; + assert!( + delete_collection_result.is_ok(), + "Teardown failed: Could not delete the test collection." + ); +} + +use typesense::client::Client; +use wiremock::{ + matchers::{body_json, method, path}, + Mock, MockServer, ResponseTemplate, +}; + +// Helper to create a Typesense client configured for a mock server. +fn get_test_client(uri: &str) -> Client { + let config = MultiNodeConfiguration { + nodes: vec![uri.parse().unwrap()], + nearest_node: None, // Not needed for single-node tests + api_key: "TEST_API_KEY".to_string(), + // Keep other settings minimal for testing + healthcheck_interval: Duration::from_secs(60), + retry_policy: ExponentialBackoff::builder().build_with_max_retries(0), + connection_timeout: Duration::from_secs(1), + }; + Client::new(config).unwrap() +} + +#[tokio::test] +async fn test_create_model_with_wiremock() { + // --- 1. Setup: Start a mock server --- + let mock_server = MockServer::start().await; + + // --- 2. Setup: Configure the Typesense client to use the mock server's URI --- + let client = get_test_client(&mock_server.uri()); + + // --- 3. Setup: Define the request and the expected successful response --- + let model_id = new_id("conv-model-test"); + let collection_name = new_id("history-collection"); + + let create_schema = ConversationModelCreateSchema { + id: Some(model_id.clone()), + model_name: "openai/gpt-4".to_string(), + api_key: Some("A-FAKE-BUT-VALID-LOOKING-KEY".to_string()), + history_collection: collection_name.clone(), + system_prompt: Some("You are a helpful assistant.".to_string()), + ..Default::default() + }; + + // This is the successful JSON body we expect the mock server to return. + // It should match the structure of `ConversationModelSchema`. + let mock_response_body = serde_json::json!({ + "id": model_id, + "model_name": "openai/gpt-4", + "history_collection": collection_name, + "api_key": "sk-FA**********************************KEY", // Masked key + "system_prompt": "You are a helpful assistant.", + "max_bytes": 16384, + "ttl": 86400 + }); + + // --- 4. Setup: Define the mock server's behavior --- + Mock::given(method("POST")) + .and(path("/conversations/models")) + .and(body_json(&create_schema)) // Ensure the client sends the correct body + .respond_with(ResponseTemplate::new(200).set_body_json(mock_response_body.clone())) + .expect(1) // Expect this mock to be called exactly once + .mount(&mock_server) + .await; + + // --- 5. Action: Call the client method --- + let create_result = client.conversations().models().create(create_schema).await; + + // --- 6. Assertion: Verify the result --- + assert!( + create_result.is_ok(), + "The client should have successfully parsed the 200 response from the mock server. Error: {:?}", + create_result.err() + ); + + // Unwrap the successful result and check if its fields match the mocked response + let created_model = create_result.unwrap(); + assert_eq!(created_model.id, model_id); + assert_eq!(created_model.model_name, "openai/gpt-4"); + assert_eq!(created_model.history_collection, collection_name); + assert_eq!( + created_model.system_prompt, + Some("You are a helpful assistant.".to_string()) + ); +} + +#[tokio::test] +async fn test_retrieve_all_models_with_wiremock() { + // --- 1. Setup --- + let mock_server = MockServer::start().await; + let client = get_test_client(&mock_server.uri()); + + // The response body should be a Vec + let mock_response_body = serde_json::json!([ + { + "id": "model-1", + "model_name": "openai/gpt-3.5-turbo", + "history_collection": "conversation_store", + "api_key": "OPENAI_API_KEY", + "system_prompt": "Hey, you are an **intelligent** assistant for question-answering. You can only make conversations based on the provided context. If a response cannot be formed strictly using the provided context, politely say you do not have knowledge about that topic.", + "max_bytes": 16384 + }, + { + "id": "model-2", + "model_name": "openai/gpt-3.5-turbo", + "history_collection": "conversation_store", + "api_key": "OPENAI_API_KEY", + "system_prompt": "Hey, you are an **intelligent** assistant for question-answering. You can only make conversations based on the provided context. If a response cannot be formed strictly using the provided context, politely say you do not have knowledge about that topic.", + "max_bytes": 16384 + } + ]); + + // --- 2. Mocking --- + Mock::given(method("GET")) + .and(path("/conversations/models")) + .respond_with(ResponseTemplate::new(200).set_body_json(&mock_response_body)) + .expect(1) + .mount(&mock_server) + .await; + + // --- 3. Action --- + let retrieve_result = client.conversations().models().retrieve().await; + + // --- 4. Assertion --- + assert!(retrieve_result.is_ok(), "Retrieving all models failed"); + let models = retrieve_result.unwrap(); + assert_eq!(models.len(), 2); + assert_eq!(models[0].id, "model-1"); + assert_eq!(models[1].id, "model-2"); +} + +#[tokio::test] +async fn test_retrieve_single_model_with_wiremock() { + // --- 1. Setup --- + let mock_server = MockServer::start().await; + let client = get_test_client(&mock_server.uri()); + + let model_id = new_id("conv-model"); + let mock_response_body = serde_json::json!({ + "id": model_id, + "model_name": "openai/gpt-3.5-turbo", + "history_collection": "conversation_store", + "api_key": "OPENAI_API_KEY", + "system_prompt": "Hey, you are an **intelligent** assistant for question-answering. You can only make conversations based on the provided context. If a response cannot be formed strictly using the provided context, politely say you do not have knowledge about that topic.", + "max_bytes": 16384 + }); + + // --- 2. Mocking --- + Mock::given(method("GET")) + .and(path(format!("/conversations/models/{}", model_id))) + .respond_with(ResponseTemplate::new(200).set_body_json(&mock_response_body)) + .expect(1) + .mount(&mock_server) + .await; + + // --- 3. Action --- + let retrieve_result = client.conversations().model(&model_id).retrieve().await; + + // --- 4. Assertion --- + assert!(retrieve_result.is_ok()); + assert_eq!(retrieve_result.unwrap().id, model_id); +} + +#[tokio::test] +async fn test_update_single_model_with_wiremock() { + // --- 1. Setup --- + let mock_server = MockServer::start().await; + let client = get_test_client(&mock_server.uri()); + + let model_id = new_id("conv-model"); + + let update_schema = ConversationModelUpdateSchema { + system_prompt: Some("A new, updated prompt.".to_string()), + ..Default::default() + }; + + // The response body reflects the updated state of the resource + let mock_response_body = serde_json::json!({ + "id": model_id, + "model_name": "openai/gpt-3.5-turbo", + "history_collection": "conversation_store", + "api_key": "OPENAI_API_KEY", + "system_prompt": "A new, updated prompt.", + "max_bytes": 16384 + }); + + // --- 2. Mocking --- + Mock::given(method("PUT")) // As per docs, update uses PUT + .and(path(format!("/conversations/models/{}", model_id))) + .and(body_json(&update_schema)) // Verify the client sends the correct update payload + .respond_with(ResponseTemplate::new(200).set_body_json(&mock_response_body)) + .expect(1) + .mount(&mock_server) + .await; + + // --- 3. Action --- + let update_result = client + .conversations() + .model(&model_id) + .update(update_schema) + .await; + + // --- 4. Assertion --- + assert!(update_result.is_ok()); + let updated_model = update_result.unwrap(); + assert_eq!(updated_model.id, model_id); + assert_eq!( + updated_model.system_prompt.unwrap(), + "A new, updated prompt." + ); +} + +#[tokio::test] +async fn test_delete_single_model_with_wiremock() { + // --- 1. Setup --- + let mock_server = MockServer::start().await; + let client = get_test_client(&mock_server.uri()); + + let model_id = new_id("conv-model-to-delete"); + + // The API returns the object that was just deleted + let mock_response_body = serde_json::json!({ + "id": model_id, + "model_name": "openai/gpt-3.5-turbo", + "history_collection": "conversation_store", + "api_key": "OPENAI_API_KEY", + "system_prompt": "Hey, you are an **intelligent** assistant for question-answering. You can only make conversations based on the provided context. If a response cannot be formed strictly using the provided context, politely say you do not have knowledge about that topic.", + "max_bytes": 16384 + }); + + // --- 2. Mocking --- + Mock::given(method("DELETE")) + .and(path(format!("/conversations/models/{}", model_id))) + .respond_with(ResponseTemplate::new(200).set_body_json(&mock_response_body)) + .expect(1) + .mount(&mock_server) + .await; + + // --- 3. Action --- + let delete_result = client.conversations().model(&model_id).delete().await; + + // --- 4. Assertion --- + assert!(delete_result.is_ok()); + let deleted_model = delete_result.unwrap(); + assert_eq!(deleted_model.id, model_id); +} diff --git a/typesense/tests/client/documents_test.rs b/typesense/tests/client/documents_test.rs new file mode 100644 index 0000000..1f9c57e --- /dev/null +++ b/typesense/tests/client/documents_test.rs @@ -0,0 +1,205 @@ +use serde_json::json; +use typesense::models::IndexAction; +use typesense_codegen::models::{ + CollectionSchema, DeleteDocumentsParameters, ExportDocumentsParameters, Field, + ImportDocumentsParameters, SearchParameters, UpdateDocumentsParameters, +}; + +use super::{get_client, new_id}; + +#[tokio::test] +async fn test_document_lifecycle() { + let client = get_client(); + let collection_name = new_id("books"); + + // --- 1. Setup: Create a Collection --- + let schema = CollectionSchema { + name: collection_name.clone(), + fields: vec![ + Field { + name: "title".to_string(), + r#type: "string".to_string(), + ..Default::default() + }, + Field { + name: "author".to_string(), + r#type: "string".to_string(), + facet: Some(true), + ..Default::default() + }, + Field { + name: "publication_year".to_string(), + r#type: "int32".to_string(), + ..Default::default() + }, + ], + ..Default::default() + }; + + let create_collection_result = client.collections().create(schema).await; + assert!( + create_collection_result.is_ok(), + "Failed to create collection" + ); + + let book_1_id = &new_id("document_1"); + let book_1 = json!({ + "id": book_1_id, + "title": "The Hitchhiker's Guide to the Galaxy", + "author": "Douglas Adams", + "publication_year": 1979 + }); + + let book_2 = json!({ + "title": "The Lord of the Rings", + "author": "J.R.R. Tolkien", + "publication_year": 1954 + }); + let collection_client = client.collection(&collection_name); + let documents_client = collection_client.documents(); + + // --- 2. Create a document (via `documents().create()`) --- + let create_res = documents_client.create(book_1.clone()).await; + assert!(create_res.is_ok(), "Failed to create document 1"); + + // --- 3. Upsert a document (via `documents().upsert()`) --- + let upsert_res = documents_client.upsert(book_2.clone()).await; + assert!(upsert_res.is_ok(), "Failed to upsert document 2"); + + // --- 4. Retrieve a single document (via `document(id).retrieve()`) --- + let retrieve_res = client + .collection(&collection_name) + .document(book_1_id) + .retrieve() + .await; + assert!(retrieve_res.is_ok(), "Failed to retrieve document 1"); + assert_eq!(retrieve_res.unwrap(), book_1); + + // --- 5. Search for documents --- + let search_params = SearchParameters { + q: Some("the".to_string()), + query_by: Some("title".to_string()), + ..Default::default() + }; + let search_res = documents_client.search(search_params).await; + assert!(search_res.is_ok(), "Search failed"); + assert_eq!(search_res.unwrap().found, Some(2)); + + // --- 6. Update a single document --- + let partial_update = json!({ "publication_year": 1980 }); + let update_res = client + .collection(&collection_name) + .document(book_1_id) + .update(partial_update) + .await; + assert!(update_res.is_ok(), "Failed to update document 1"); + + // --- 7. Verify the single update --- + let retrieve_after_update_res = client + .collection(&collection_name) + .document(book_1_id) + .retrieve() + .await; + let updated_doc = retrieve_after_update_res.unwrap(); + assert_eq!( + updated_doc.get("publication_year").unwrap().as_i64(), + Some(1980) + ); + + // --- 8. Delete a single document --- + let delete_res = client + .collection(&collection_name) + .document(book_1_id) + .delete() + .await; + assert!(delete_res.is_ok(), "Failed to delete document 1"); + + // --- 9. Verify single deletion --- + let retrieve_after_delete_res = client + .collection(&collection_name) + .document(book_1_id) + .retrieve() + .await; + assert!( + retrieve_after_delete_res.is_err(), + "Document should not exist after deletion" + ); + + // --- 10. Bulk Import --- + let new_books_jsonl = format!( + "{}\n{}", + json!({"title": "Foundation", "author": "Isaac Asimov", "publication_year": 1951}), + json!({"title": "Dune", "author": "Frank Herbert", "publication_year": 1965}) + ); + + let import_params = ImportDocumentsParameters { + action: Some(IndexAction::Create), + ..Default::default() + }; + let import_res = documents_client + .import(new_books_jsonl, import_params) + .await; + assert!(import_res.is_ok(), "Bulk import failed"); + + // Give Typesense a moment to index + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + // --- 11. Verify Import via Search --- + let search_after_import_params = SearchParameters { + q: Some("*".to_string()), + query_by: Some("title".to_string()), + ..Default::default() + }; + let search_after_import_res = documents_client.search(search_after_import_params).await; + let search_results = search_after_import_res.unwrap(); + // 1 remaining (book_2) + 2 new imports = 3 + assert_eq!(search_results.found, Some(3)); + + // --- 12. Bulk Update (via `documents().update()`) --- + let bulk_update_params = UpdateDocumentsParameters { + filter_by: Some("publication_year:<1960".to_string()), + }; + let bulk_update_payload = json!({ "author": "Sci-Fi Pioneer" }); + let bulk_update_res = documents_client + .update(bulk_update_payload, bulk_update_params) + .await; + assert!(bulk_update_res.is_ok(), "Bulk update failed"); + // Should update Lord of the Rings (1954) and Foundation (1951) + assert_eq!(bulk_update_res.unwrap().num_updated, 2); + + // Give Typesense a moment to index + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + // --- 13. Export documents (via `documents().export()`) --- + let export_params = ExportDocumentsParameters { + filter_by: Some("author:\"Sci-Fi Pioneer\"".to_string()), + ..Default::default() + }; + let export_res = documents_client.export(export_params).await; + + assert!(export_res.is_ok(), "Export failed"); + let exported_jsonl = export_res.unwrap(); + + // Verify the exported content is a JSONL string with 2 lines. + let lines: Vec<&str> = exported_jsonl.trim().split('\n').collect(); + assert_eq!(lines.len(), 2, "Exported JSONL should have 2 lines"); + let exported_doc_1: serde_json::Value = serde_json::from_str(lines[0]).unwrap(); + assert_eq!(exported_doc_1["author"], "Sci-Fi Pioneer"); + + // --- 14. Bulk Delete --- + let delete_params = DeleteDocumentsParameters { + filter_by: "publication_year:>1960".to_string(), + ..Default::default() + }; + let bulk_delete_res = documents_client.delete(delete_params).await; + assert!(bulk_delete_res.is_ok(), "Bulk delete failed"); + // Only "Dune" (1965) should be deleted + assert_eq!(bulk_delete_res.unwrap().num_deleted, 1); + + // --- 15. Teardown: Delete the collection --- + let delete_collection_result = client.collection(&collection_name).delete().await; + assert!( + delete_collection_result.is_ok(), + "Failed to delete collection" + ); +} diff --git a/typesense/tests/client/keys_test.rs b/typesense/tests/client/keys_test.rs new file mode 100644 index 0000000..1e40569 --- /dev/null +++ b/typesense/tests/client/keys_test.rs @@ -0,0 +1,82 @@ +use super::get_client; +use typesense_codegen::models::ApiKeySchema; + +#[tokio::test] +async fn test_keys_lifecycle() { + let client = get_client(); + let key_description = "A test search-only key."; + + // --- 1. Create a new API Key (via `keys`) --- + let key_schema = ApiKeySchema { + description: key_description.to_string(), + actions: vec!["documents:search".to_string()], // Grant only search permissions + collections: vec!["*".to_string()], // For all collections + ..Default::default() + }; + + let create_result = client.keys().create(key_schema).await; + assert!(create_result.is_ok(), "Failed to create the API key."); + let created_key = create_result.unwrap(); + + // The full key value is only returned on creation + assert!( + created_key.value.is_some(), + "The full API key value should be present upon creation." + ); + assert_eq!(created_key.description, key_description.to_string()); + + let key_id = created_key.id.unwrap(); + + // --- 2. Retrieve the specific key (via `key`) --- + let retrieve_one_result = client.key(key_id).retrieve().await; + assert!( + retrieve_one_result.is_ok(), + "Failed to retrieve the specific API key." + ); + let retrieved_key = retrieve_one_result.unwrap(); + + // On retrieval, the value should be None and the prefix should be present + assert_eq!(retrieved_key.id.unwrap(), key_id); + assert!( + retrieved_key.value.is_none(), + "The retrieved key should not contain the full value." + ); + assert!( + retrieved_key.value_prefix.is_some(), + "The retrieved key should have a value prefix." + ); + + // --- 3. Retrieve all keys (via `keys`) --- + let retrieve_all_result = client.keys().retrieve().await; + assert!( + retrieve_all_result.is_ok(), + "Failed to retrieve the list of keys." + ); + let all_keys_response = retrieve_all_result.unwrap(); + + // --- 4. Find our specific key within the list --- + let our_key = all_keys_response + .keys + .iter() + .find(|k| k.id.unwrap() == (key_id)); + assert!( + our_key.is_some(), + "The newly created key was not found in the list." + ); + + // --- 5. Delete the key (via `key`) --- + let delete_result = client.key(key_id).delete().await; + assert!(delete_result.is_ok(), "Failed to delete the API key."); + let delete_response = delete_result.unwrap(); + assert_eq!( + delete_response.id, key_id, + "The response from delete should contain the correct key ID." + ); + + // --- 6. Verify Deletion --- + let get_after_delete_result = client.key(key_id).retrieve().await; + assert!( + get_after_delete_result.is_err(), + "API key should not exist after deletion." + ); +} diff --git a/typesense/tests/client/mod.rs b/typesense/tests/client/mod.rs index cf8192b..0e77bda 100644 --- a/typesense/tests/client/mod.rs +++ b/typesense/tests/client/mod.rs @@ -1,289 +1,50 @@ +pub mod aliases_test; +pub mod analytics_test; +pub mod client_test; +pub mod collections_test; +pub mod conversation_models_test; +pub mod documents_test; +pub mod keys_test; +pub mod multi_search_test; +pub mod presets_test; +pub mod search_overrides_test; +pub mod stemming_dictionaries_test; +pub mod stopwords_test; +pub mod synonyms_test; + use reqwest::Url; use reqwest_retry::policies::ExponentialBackoff; use std::time::Duration; -use typesense::client::*; -use typesense::models::CollectionResponse; -use wiremock::matchers::{header, method, path}; -use wiremock::{Mock, MockServer, ResponseTemplate}; - -// Helper to create a mock Typesense server for a successful collection retrieval. -async fn setup_mock_server_ok(server: &MockServer, collection_name: &str) { - let response_body = CollectionResponse { - name: collection_name.to_string(), - ..Default::default() - }; - - Mock::given(method("GET")) - .and(path(format!("/collections/{}", collection_name))) - .and(header("X-TYPESENSE-API-KEY", "test-key")) - .respond_with(ResponseTemplate::new(200).set_body_json(response_body)) - .mount(server) - .await; -} - -// Helper to create a mock Typesense server that returns a server error. -async fn setup_mock_server_503(server: &MockServer, collection_name: &str) { - Mock::given(method("GET")) - .and(path(format!("/collections/{}", collection_name))) - .respond_with(ResponseTemplate::new(503)) - .mount(server) - .await; -} - -// Helper to create a mock Typesense server that returns a 404 Not Found error. -async fn setup_mock_server_404(server: &MockServer, collection_name: &str) { - Mock::given(method("GET")) - .and(path(format!("/collections/{}", collection_name))) - .respond_with(ResponseTemplate::new(404)) - .mount(server) - .await; -} - -// Helper function to create a client configuration for tests. -fn get_test_config(nodes: Vec, nearest_node: Option) -> MultiNodeConfiguration { - MultiNodeConfiguration { - nodes, - nearest_node, - api_key: "test-key".to_string(), +use std::time::{SystemTime, UNIX_EPOCH}; +use typesense::client::{Client, MultiNodeConfiguration}; + +/// Helper function to create a new client for all tests in this suite. +pub fn get_client() -> Client { + let config = MultiNodeConfiguration { + nodes: vec![Url::parse("http://localhost:8108").unwrap()], + nearest_node: None, + api_key: "xyz".to_string(), healthcheck_interval: Duration::from_secs(60), - retry_policy: ExponentialBackoff::builder().build_with_max_retries(0), - connection_timeout: Duration::from_secs(1), - } -} - -#[tokio::test] -async fn test_success_on_first_node() { - let server1 = MockServer::start().await; - setup_mock_server_ok(&server1, "products").await; - - let config = get_test_config(vec![Url::parse(&server1.uri()).unwrap()], None); - let client = Client::new(config).unwrap(); - - let result = client.collection("products").retrieve().await; - - assert!(result.is_ok()); - assert_eq!(result.unwrap().name, "products"); - // Check that the server received exactly one request. - assert_eq!(server1.received_requests().await.unwrap().len(), 1); -} - -#[tokio::test] -async fn test_failover_to_second_node() { - let server1 = MockServer::start().await; - let server2 = MockServer::start().await; - setup_mock_server_503(&server1, "products").await; - setup_mock_server_ok(&server2, "products").await; - - let config = get_test_config( - vec![ - Url::parse(&server1.uri()).unwrap(), - Url::parse(&server2.uri()).unwrap(), - ], - None, - ); - let client = Client::new(config).unwrap(); - - let result = client.collection("products").retrieve().await; - assert!(result.is_ok()); - - // The first server should have been tried and failed. - assert_eq!(server1.received_requests().await.unwrap().len(), 1); - // The second server should have been tried and succeeded. - assert_eq!(server2.received_requests().await.unwrap().len(), 1); -} - -#[tokio::test] -async fn test_nearest_node_is_prioritized() { - let nearest_server = MockServer::start().await; - let regular_server = MockServer::start().await; - setup_mock_server_ok(&nearest_server, "products").await; - setup_mock_server_ok(®ular_server, "products").await; - - let config = get_test_config( - vec![Url::parse(®ular_server.uri()).unwrap()], - Some(Url::parse(&nearest_server.uri()).unwrap()), - ); - let client = Client::new(config).unwrap(); - - let result = client.collection("products").retrieve().await; - assert!(result.is_ok()); - - // Only the nearest node should have received a request. - assert_eq!(nearest_server.received_requests().await.unwrap().len(), 1); - assert_eq!(regular_server.received_requests().await.unwrap().len(), 0); -} - -#[tokio::test] -async fn test_failover_from_nearest_to_regular_node() { - let nearest_server = MockServer::start().await; - let regular_server = MockServer::start().await; - setup_mock_server_503(&nearest_server, "products").await; - setup_mock_server_ok(®ular_server, "products").await; - - let config = get_test_config( - vec![Url::parse(®ular_server.uri()).unwrap()], - Some(Url::parse(&nearest_server.uri()).unwrap()), - ); - let client = Client::new(config).unwrap(); - - let result = client.collection("products").retrieve().await; - assert!(result.is_ok()); - - // Nearest node should have failed. - assert_eq!(nearest_server.received_requests().await.unwrap().len(), 1); - // Regular node should have succeeded. - assert_eq!(regular_server.received_requests().await.unwrap().len(), 1); -} - -#[tokio::test] -async fn test_round_robin_failover() { - let server1 = MockServer::start().await; - let server2 = MockServer::start().await; - let server3 = MockServer::start().await; - setup_mock_server_503(&server1, "products").await; - setup_mock_server_503(&server2, "products").await; - setup_mock_server_ok(&server3, "products").await; - - let config = get_test_config( - vec![ - Url::parse(&server1.uri()).unwrap(), - Url::parse(&server2.uri()).unwrap(), - Url::parse(&server3.uri()).unwrap(), - ], - None, - ); - let client = Client::new(config).unwrap(); - - // First request should fail over to the third node - let result = client.collection("products").retrieve().await; - assert!(result.is_ok()); - assert_eq!(server1.received_requests().await.unwrap().len(), 1); - assert_eq!(server2.received_requests().await.unwrap().len(), 1); - assert_eq!(server3.received_requests().await.unwrap().len(), 1); - - // The next request should start from the now-healthy 3rd node, but round-robin - // logic will have advanced the internal counter. Let's see it wrap around. - // We expect the next attempt to be on server 3 again, then 1 (if 3 fails). - - // Reset server 3 to also fail - server3.reset().await; - setup_mock_server_503(&server3, "products").await; - // Make server 1 healthy again - server1.reset().await; - setup_mock_server_ok(&server1, "products").await; - - let result2 = client.collection("products").retrieve().await; - assert!(result2.is_ok()); - - // Server 3 was tried first and failed. - assert_eq!(server3.received_requests().await.unwrap().len(), 1); - // Server 1 was tried next and succeeded. - assert_eq!(server1.received_requests().await.unwrap().len(), 1); - // Server 2 was not touched this time. - assert_eq!(server2.received_requests().await.unwrap().len(), 1); // Remains 1 from first call -} - -#[tokio::test] -async fn test_health_check_and_node_recovery() { - let server1 = MockServer::start().await; - let server2 = MockServer::start().await; - - setup_mock_server_503(&server1, "products").await; - setup_mock_server_ok(&server2, "products").await; - - let mut config = get_test_config( - vec![ - Url::parse(&server1.uri()).unwrap(), - Url::parse(&server2.uri()).unwrap(), - ], - None, - ); - // Use a very short healthcheck interval for the test - config.healthcheck_interval = Duration::from_millis(500); - let client = Client::new(config).unwrap(); - - // 1. First request fails over to server2, marking server1 as unhealthy. - assert!(client.collection("products").retrieve().await.is_ok()); - assert_eq!(server1.received_requests().await.unwrap().len(), 1); - assert_eq!(server2.received_requests().await.unwrap().len(), 1); - - // 2. Immediate second request should go directly to server2. - assert!(client.collection("products").retrieve().await.is_ok()); - assert_eq!(server1.received_requests().await.unwrap().len(), 1); // No new request - assert_eq!(server2.received_requests().await.unwrap().len(), 2); // Got another request - - // 3. Wait for the healthcheck interval to pass. - tokio::time::sleep(Duration::from_millis(600)).await; - - // 4. Make server1 healthy again. - server1.reset().await; - setup_mock_server_ok(&server1, "products").await; - - // 5. The next request should try server1 again (due to healthcheck expiry) and succeed. - assert!(client.collection("products").retrieve().await.is_ok()); - assert_eq!(server1.received_requests().await.unwrap().len(), 1); // Server 1 received its first successful req - assert_eq!(server2.received_requests().await.unwrap().len(), 2); // No new request for server 2 -} - -#[tokio::test] -async fn test_all_nodes_fail() { - let server1 = MockServer::start().await; - let server2 = MockServer::start().await; - setup_mock_server_503(&server1, "products").await; - setup_mock_server_503(&server2, "products").await; - - let config = get_test_config( - vec![ - Url::parse(&server1.uri()).unwrap(), - Url::parse(&server2.uri()).unwrap(), - ], - None, - ); - let client = Client::new(config).unwrap(); - - let result = client.collection("products").retrieve().await; - assert!(result.is_err()); - - match result.err().unwrap() { - Error::AllNodesFailed(_) => { /* This is the expected outcome */ } - _ => panic!("Expected AllNodesFailed error"), - } - - // Both servers should have been tried. - assert_eq!(server1.received_requests().await.unwrap().len(), 1); - assert_eq!(server2.received_requests().await.unwrap().len(), 1); + retry_policy: ExponentialBackoff::builder().build_with_max_retries(3), + connection_timeout: Duration::from_secs(10), + }; + Client::new(config).unwrap() } -#[tokio::test] -async fn test_fail_fast_on_non_retriable_error() { - let server1 = MockServer::start().await; - let server2 = MockServer::start().await; - - setup_mock_server_404(&server1, "products").await; - setup_mock_server_ok(&server2, "products").await; - - let config = get_test_config( - vec![ - Url::parse(&server1.uri()).unwrap(), - Url::parse(&server2.uri()).unwrap(), - ], - None, - ); - let client = Client::new(config).unwrap(); - - let result = client.collection("products").retrieve().await; - assert!(result.is_err()); +/// Generates a unique name for a test resource by combining a prefix, +/// a nanoid, and an optional suffix. +/// e.g., "test_collection_aB1cD2eF_create" +pub fn new_id(suffix: &str) -> String { + // Using nanoid for a short, URL-friendly, and collision-resistant random ID. + // The default length of 21 is more than enough. We use 8 for conciseness. + let random_part = nanoid::nanoid!(8); // e.g., "fX3a-b_1" - // Check that the error is the non-retriable API error. - match result.err().unwrap() { - Error::Api(typesense_codegen::apis::Error::ResponseError(content)) => { - assert_eq!(content.status, reqwest::StatusCode::NOT_FOUND); - } - e => panic!("Expected an API error, but got {:?}", e), - } + // The timestamp helps ensure IDs are unique even across test runs that happen close together, + // although nanoid is likely sufficient on its own. + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_millis(); - // The first server should have been tried. - assert_eq!(server1.received_requests().await.unwrap().len(), 1); - // The second server should NOT have been tried. - assert_eq!(server2.received_requests().await.unwrap().len(), 0); + format!("test_{}_{}_{}", suffix, timestamp, random_part) } diff --git a/typesense/tests/client/multi_search_test.rs b/typesense/tests/client/multi_search_test.rs new file mode 100644 index 0000000..ce5e2b1 --- /dev/null +++ b/typesense/tests/client/multi_search_test.rs @@ -0,0 +1,248 @@ +use typesense_codegen::models::{ + CollectionSchema, Field, ImportDocumentsParameters, MultiSearchCollectionParameters, + MultiSearchParameters, MultiSearchSearchesParameter, +}; + +use super::{get_client, new_id}; + +async fn setup_multi_search_tests( + client: &typesense::client::Client, + products_collection_name: &str, + brands_collection_name: &str, +) { + // --- Create collections --- + let products_schema = CollectionSchema { + name: products_collection_name.to_string(), + fields: vec![ + Field::new("name".to_string(), "string".to_string()), + Field::new("price".to_string(), "int32".to_string()), + ], + ..Default::default() + }; + client.collections().create(products_schema).await.unwrap(); + + let brands_schema = CollectionSchema { + name: brands_collection_name.to_string(), + fields: vec![ + Field::new("company_name".to_string(), "string".to_string()), + Field::new("country".to_string(), "string".to_string()), + ], + ..Default::default() + }; + client.collections().create(brands_schema).await.unwrap(); + + // --- Index documents --- + let product_docs = r#" + {"id": "p1", "name": "iPhone 15", "price": 999} + {"id": "p2", "name": "MacBook Pro", "price": 1999} + "# + .trim() + .lines() + .map(|s| s.trim()) + .collect::>() + .join("\n"); + + client + .collection(products_collection_name) + .documents() + .import( + product_docs, + ImportDocumentsParameters { + action: Some(typesense::models::IndexAction::Create), + ..Default::default() + }, + ) + .await + .unwrap(); + + let brand_docs = r#" + {"id": "b1", "company_name": "Apple Inc.", "country": "USA"} + {"id": "b2", "company_name": "Samsung", "country": "South Korea"} + "# + .trim() + .lines() + .map(|s| s.trim()) + .collect::>() + .join("\n"); + + client + .collection(brands_collection_name) + .documents() + .import( + brand_docs, + ImportDocumentsParameters { + action: Some(typesense::models::IndexAction::Create), + ..Default::default() + }, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn test_multi_search_federated() { + let client = get_client(); + let products_collection_name = new_id("products"); + let brands_collection_name = new_id("brands"); + setup_multi_search_tests(&client, &products_collection_name, &brands_collection_name).await; + + let search_requests = MultiSearchSearchesParameter { + union: Some(false), + searches: vec![ + MultiSearchCollectionParameters { + q: Some("pro".into()), + query_by: Some("name".into()), + collection: Some(products_collection_name.clone()), + ..Default::default() + }, + MultiSearchCollectionParameters { + q: Some("USA".into()), + query_by: Some("country".into()), + collection: Some(brands_collection_name.clone()), + ..Default::default() + }, + ], + }; + + let common_params = MultiSearchParameters::default(); + + let result = client + .multi_search() + .perform(search_requests, common_params) + .await; + + assert!(result.is_ok(), "Multi-search request failed"); + let response = result.unwrap(); + + assert_eq!( + response.results.len(), + 2, + "Expected 2 sets of search results" + ); + + // --- Assert products result --- + let products_result = &response.results[0]; + assert!( + products_result.error.is_none(), + "First search returned an error" + ); + assert_eq!(products_result.found, Some(1)); + let product_hit = &products_result.hits.as_ref().unwrap()[0]; + let product_doc = product_hit.document.as_ref().unwrap().as_object().unwrap(); + assert_eq!( + product_doc.get("name").unwrap().as_str(), + Some("MacBook Pro") + ); + + // --- Assert brands result --- + let brands_result = &response.results[1]; + assert!( + brands_result.error.is_none(), + "Second search returned an error" + ); + assert_eq!(brands_result.found, Some(1)); + let brand_hit = &brands_result.hits.as_ref().unwrap()[0]; + let brand_doc = brand_hit.document.as_ref().unwrap().as_object().unwrap(); + assert_eq!( + brand_doc.get("company_name").unwrap().as_str(), + Some("Apple Inc.") + ); + + // --- Cleanup --- + client + .collection(&products_collection_name) + .delete() + .await + .unwrap(); + client + .collection(&brands_collection_name) + .delete() + .await + .unwrap(); +} + +#[tokio::test] +async fn test_multi_search_with_common_params() { + let client = get_client(); + let products_collection_name = new_id("products_common"); + let brands_collection_name = new_id("brands_common"); + setup_multi_search_tests(&client, &products_collection_name, &brands_collection_name).await; + + // Define individual searches, each with the correct `query_by` for its schema. + let search_requests = MultiSearchSearchesParameter { + union: Some(false), + searches: vec![ + MultiSearchCollectionParameters { + collection: Some(products_collection_name.clone()), + q: Some("pro".into()), // This should find "Macbook Pro" + query_by: Some("name".into()), // Specific to the products schema + ..Default::default() + }, + MultiSearchCollectionParameters { + collection: Some(brands_collection_name.clone()), + q: Some("inc".into()), // This should find "Apple Inc." + query_by: Some("company_name".into()), // Specific to the brands schema + ..Default::default() + }, + ], + }; + + let common_params = MultiSearchParameters { + limit: Some(1), + ..Default::default() + }; + + let result = client + .multi_search() + .perform(search_requests, common_params) + .await; + + assert!( + result.is_ok(), + "Multi-search request failed: {:?}", + result.err() + ); + let response = result.unwrap(); + + assert_eq!(response.results.len(), 2); + + // --- Assert products result --- + let products_result = &response.results[0]; + assert!( + products_result.error.is_none(), + "Products search returned an error: {:?}", + products_result.error + ); + assert_eq!(products_result.found, Some(1)); + let product_hit = &products_result.hits.as_ref().unwrap()[0]; + assert_eq!( + product_hit.document.as_ref().unwrap()["name"], + "MacBook Pro" + ); + + // --- Assert brands result --- + let brands_result = &response.results[1]; + assert!( + brands_result.error.is_none(), + "Brands search returned an error: {:?}", + brands_result.error + ); + assert_eq!(brands_result.found, Some(1)); + let brand_hit = &brands_result.hits.as_ref().unwrap()[0]; + assert_eq!( + brand_hit.document.as_ref().unwrap()["company_name"], + "Apple Inc." + ); + + // --- Cleanup --- + client + .collection(&products_collection_name) + .delete() + .await + .unwrap(); + client + .collection(&brands_collection_name) + .delete() + .await + .unwrap(); +} diff --git a/typesense/tests/client/presets_test.rs b/typesense/tests/client/presets_test.rs new file mode 100644 index 0000000..39bc67f --- /dev/null +++ b/typesense/tests/client/presets_test.rs @@ -0,0 +1,86 @@ +use typesense_codegen::models::{ + PresetSchema, PresetUpsertSchema, PresetUpsertSchemaValue, SearchParameters, +}; + +use super::{get_client, new_id}; + +#[tokio::test] +async fn test_presets_lifecycle() { + let client = get_client(); + let preset_id = new_id("search-preset"); + + // --- 1. Define the Preset's value using the strong types --- + // This will be the expected value in the response as well. + let search_params = SearchParameters { + query_by: Some("title,authors".to_string()), + sort_by: Some("_text_match:desc,publication_year:desc".to_string()), + ..Default::default() + }; + let expected_preset_value = PresetUpsertSchemaValue::SearchParameters(Box::new(search_params)); + + // This is the schema to be sent in the request body. + let upsert_schema = PresetUpsertSchema { + value: Box::new(expected_preset_value.clone()), + }; + + // --- 2. Create (Upsert) a Preset (via `presets`) --- + let upsert_result = client.presets().upsert(&preset_id, upsert_schema).await; + assert!( + upsert_result.is_ok(), + "Failed to create preset: {:?}", + upsert_result.err() + ); + + // The API returns a full PresetSchema object. + let created_preset: PresetSchema = upsert_result.unwrap(); + assert_eq!(created_preset.name, preset_id); + // Compare the strongly-typed value field directly. + assert_eq!(*created_preset.value, expected_preset_value); + + // --- 3. Retrieve the specific preset (via `preset`) --- + let retrieve_one_result = client.preset(&preset_id).retrieve().await; + assert!( + retrieve_one_result.is_ok(), + "Failed to retrieve the specific preset." + ); + let retrieved_preset: PresetSchema = retrieve_one_result.unwrap(); + assert_eq!(retrieved_preset.name, preset_id); + assert_eq!(*retrieved_preset.value, expected_preset_value); + + // --- 4. Retrieve all presets (via `presets`) --- + let retrieve_all_result = client.presets().retrieve().await; + assert!( + retrieve_all_result.is_ok(), + "Failed to retrieve all presets." + ); + let all_presets_response = retrieve_all_result.unwrap(); + + // --- 5. Find our preset in the list --- + let our_preset = all_presets_response + .presets + .iter() + .find(|p| p.name == preset_id); + + assert!( + our_preset.is_some(), + "The created preset was not found in the list." + ); + + if let Some(preset) = our_preset { + assert_eq!(preset.name, preset_id); + assert_eq!(*preset.value, expected_preset_value); + } + + // --- 6. Delete the preset (via `preset`) --- + let delete_result = client.preset(&preset_id).delete().await; + assert!(delete_result.is_ok(), "Failed to delete preset."); + let deleted_preset = delete_result.unwrap(); + assert_eq!(deleted_preset.name, preset_id); + + // --- 7. Verify Deletion --- + let get_after_delete_result = client.preset(&preset_id).retrieve().await; + assert!( + get_after_delete_result.is_err(), + "Preset should not exist after deletion." + ); +} diff --git a/typesense/tests/client/search_overrides_test.rs b/typesense/tests/client/search_overrides_test.rs new file mode 100644 index 0000000..e67376f --- /dev/null +++ b/typesense/tests/client/search_overrides_test.rs @@ -0,0 +1,120 @@ +use typesense_codegen::models::{ + CollectionSchema, Field, SearchOverrideInclude, SearchOverrideRule, SearchOverrideSchema, +}; + +use super::{get_client, new_id}; + +#[tokio::test] +async fn test_search_overrides_lifecycle() { + let client = get_client(); + let collection_name = new_id("products"); + let override_id = new_id("promo_products"); + + // --- 1. Setup: Create a collection and add some documents --- + let schema = CollectionSchema { + name: collection_name.clone(), + fields: vec![ + Field { + name: "name".to_string(), + r#type: "string".to_string(), + ..Default::default() + }, + Field { + name: "category".to_string(), + r#type: "string".to_string(), + facet: Some(true), + ..Default::default() + }, + ], + ..Default::default() + }; + client.collections().create(schema).await.unwrap(); + + // --- 2. Create (Upsert) a Search Override (via `search_overrides`) --- + let override_schema = SearchOverrideSchema { + rule: Box::new(SearchOverrideRule { + query: Some("products".to_string()), + r#match: Some(typesense::models::search_override_rule::Match::Exact), + ..Default::default() + }), + includes: Some(vec![SearchOverrideInclude { + id: "3".to_string(), + position: 1, + }]), + ..Default::default() + }; + + let upsert_result = client + .collection(&collection_name) + .search_overrides() + .upsert(&override_id, override_schema) + .await; + + assert!(upsert_result.is_ok(), "Failed to create search override"); + let created_override = upsert_result.unwrap(); + assert_eq!(created_override.id, override_id); + assert_eq!(created_override.rule.query.unwrap(), "products"); + + // --- 3. Retrieve the specific override (via `search_override`) --- + let retrieve_one_result = client + .collection(&collection_name) + .search_override(&override_id) + .retrieve() + .await; + + assert!( + retrieve_one_result.is_ok(), + "Failed to retrieve the specific search override." + ); + let retrieved_override = retrieve_one_result.unwrap(); + assert_eq!(retrieved_override.id, override_id); + assert_eq!(retrieved_override.includes.unwrap()[0].id, "3"); + + // --- 4. List all overrides (via `search_overrides`) --- + let list_result = client + .collection(&collection_name) + .search_overrides() + .list() + .await; + + assert!(list_result.is_ok(), "Failed to list search overrides."); + let list_response = list_result.unwrap(); + assert_eq!(list_response.overrides.len(), 1); + assert!( + list_response + .overrides + .iter() + .find(|o| o.id == override_id) + .is_some(), + "The newly created override was not found in the list." + ); + + // --- 5. Delete the override (via `search_override`) --- + let delete_result = client + .collection(&collection_name) + .search_override(&override_id) + .delete() + .await; + + assert!(delete_result.is_ok(), "Failed to delete search override."); + let delete_response = delete_result.unwrap(); + assert_eq!(delete_response.id, override_id); + + // --- 6. Verify Deletion --- + let get_after_delete_result = client + .collection(&collection_name) + .search_override(&override_id) + .retrieve() + .await; + assert!( + get_after_delete_result.is_err(), + "Search override should not exist after deletion." + ); + + // --- 7. Teardown: Delete the collection --- + let delete_collection_result = client.collection(&collection_name).delete().await; + assert!( + delete_collection_result.is_ok(), + "Failed to delete collection after test." + ); +} diff --git a/typesense/tests/client/stemming_dictionaries_test.rs b/typesense/tests/client/stemming_dictionaries_test.rs new file mode 100644 index 0000000..60b0d91 --- /dev/null +++ b/typesense/tests/client/stemming_dictionaries_test.rs @@ -0,0 +1,73 @@ +use crate::{get_client, new_id}; + +#[tokio::test] +async fn test_stemming_dictionary_import_and_retrieve() { + let client = get_client(); + let dictionary_id = new_id("verb_stems_v2"); + + // --- 1. Define and Import the Dictionary --- + // The JSONL payload uses "word" and "root" keys. + let dictionary_data = r#"{"word": "running", "root": "run"} +{"word": "flies", "root": "fly"}"# + .to_string(); + let import_result = client + .stemming() + .dictionaries() + .import(&dictionary_id, dictionary_data) + .await; + assert!( + import_result.is_ok(), + "Failed to import stemming dictionary. Error: {:?}", + import_result.err() + ); + + // --- 2. Retrieve the specific dictionary by its ID to verify contents --- + // This is necessary because the list operation only returns IDs. + let get_result = client + .stemming() + .dictionary(&dictionary_id) + .retrieve() + .await; + assert!( + get_result.is_ok(), + "Failed to retrieve the specific stemming dictionary. Error: {:?}", + get_result.err() + ); + + let dictionary = get_result.unwrap(); + assert_eq!(dictionary.id, dictionary_id); + assert_eq!( + dictionary.words.len(), + 2, + "The number of words in the retrieved dictionary is incorrect." + ); + assert!( + dictionary + .words + .iter() + .any(|w| w.word == "running" && w.root == "run"), + "The mapping for 'running' -> 'run' was not found." + ); + + // --- 3. Retrieve all dictionary IDs and find ours --- + let list_result = client.stemming().dictionaries().retrieve().await; + assert!( + list_result.is_ok(), + "Failed to retrieve the list of stemming dictionaries. Error: {:?}", + list_result.err() + ); + + let list_response = list_result.unwrap(); + let dictionary_ids = list_response.dictionaries; + + assert!( + dictionary_ids.is_some(), + "The list of dictionary IDs should not be None." + ); + + let ids_vec = dictionary_ids.unwrap(); + assert!( + ids_vec.iter().any(|id| id == &dictionary_id), + "The newly imported dictionary's ID was not found in the master list." + ); +} diff --git a/typesense/tests/client/stopwords_test.rs b/typesense/tests/client/stopwords_test.rs new file mode 100644 index 0000000..f4d5580 --- /dev/null +++ b/typesense/tests/client/stopwords_test.rs @@ -0,0 +1,59 @@ +use typesense_codegen::models::StopwordsSetUpsertSchema; + +use super::{get_client, new_id}; + +#[tokio::test] +async fn test_stopwords_and_stopword_lifecycle() { + let client = get_client(); + let set_id = new_id("custom_stopwords"); + + // --- 1. Upsert a Stopwords Set (via `stopwords`) --- + let schema = StopwordsSetUpsertSchema { + stopwords: vec!["a".to_string(), "the".to_string(), "an".to_string()], + ..Default::default() + }; + + let upsert_result = client.stopwords().upsert(&set_id, schema).await; + assert!(upsert_result.is_ok(), "Failed to upsert stopwords set"); + let upserted_set = upsert_result.unwrap(); + assert_eq!(upserted_set.id, set_id); + assert_eq!(upserted_set.stopwords, vec!["a", "the", "an"]); + + // --- 2. Retrieve the specific Stopword set (via `stopword`) --- + let retrieve_one_result = client.stopword(&set_id).retrieve().await; + assert!( + retrieve_one_result.is_ok(), + "Failed to retrieve the newly created stopwords set." + ); + let retrieved_set = retrieve_one_result.unwrap(); + assert_eq!(retrieved_set.stopwords.id, set_id); + assert_eq!(retrieved_set.stopwords.stopwords, vec!["a", "the", "an"]); + + // --- 3. Retrieve all stopwords sets (via `stopwords`) --- + let retrieve_all_result = client.stopwords().retrieve().await; + assert!( + retrieve_all_result.is_ok(), + "Failed to retrieve the list of stopwords sets." + ); + let all_sets = retrieve_all_result.unwrap(); + + // --- 4. Find our specific set within the list --- + let our_set = all_sets.stopwords.iter().find(|s| s.id == set_id); + assert!( + our_set.is_some(), + "The newly created stopwords set was not found in the list." + ); + + // --- 5. Delete the Stopword set (via `stopword`) --- + let delete_result = client.stopword(&set_id).delete().await; + assert!(delete_result.is_ok(), "Failed to delete stopwords set"); + let deleted_response = delete_result.unwrap(); + assert_eq!(deleted_response.id, set_id); + + // --- 6. Verify Deletion --- + let get_after_delete_result = client.stopword(&set_id).retrieve().await; + assert!( + get_after_delete_result.is_err(), + "Stopwords set should not exist after deletion" + ); +} diff --git a/typesense/tests/client/synonyms_test.rs b/typesense/tests/client/synonyms_test.rs new file mode 100644 index 0000000..a6141d5 --- /dev/null +++ b/typesense/tests/client/synonyms_test.rs @@ -0,0 +1,112 @@ +use typesense_codegen::models::{CollectionSchema, Field, SearchSynonymSchema}; + +use super::{get_client, new_id}; + +#[tokio::test] +async fn test_synonyms_lifecycle() { + let client = get_client(); + let collection_name = new_id("products"); + let synonym_id = new_id("synonym-123"); + + // --- 1. Create a collection to house the synonyms --- + let collection_schema = CollectionSchema { + name: collection_name.clone(), + fields: vec![Field { + name: "name".to_string(), + r#type: "string".to_string(), + ..Default::default() + }], + ..Default::default() + }; + let create_collection_result = client.collections().create(collection_schema).await; + assert!( + create_collection_result.is_ok(), + "Failed to create collection for synonym test" + ); + + // --- 2. Create (Upsert) a Synonym (via `synonyms`) --- + let synonym_schema = SearchSynonymSchema { + synonyms: vec![ + "blazer".to_string(), + "jacket".to_string(), + "coat".to_string(), + ], + ..Default::default() + }; + + let upsert_result = client + .collection(&collection_name) + .synonyms() + .upsert(&synonym_id, synonym_schema) + .await; + + assert!(upsert_result.is_ok(), "Failed to create synonym"); + let created_synonym = upsert_result.unwrap(); + assert_eq!(created_synonym.id, synonym_id); + + // --- 3. Retrieve the specific synonym (via `synonym`) --- + let retrieve_one_result = client + .collection(&collection_name) + .synonym(&synonym_id) + .get() + .await; + + assert!( + retrieve_one_result.is_ok(), + "Failed to retrieve the specific synonym." + ); + let retrieved_synonym = retrieve_one_result.unwrap(); + assert_eq!(retrieved_synonym.id, synonym_id); + assert_eq!(retrieved_synonym.synonyms.len(), 3); + + // --- 4. Retrieve all synonyms for the collection (via `synonyms`) --- + let retrieve_all_result = client + .collection(&collection_name) + .synonyms() + .retrieve() + .await; + assert!( + retrieve_all_result.is_ok(), + "Failed to retrieve the list of synonyms." + ); + let all_synonyms_response = retrieve_all_result.unwrap(); + + // --- 5. Find our specific synonym within the list --- + let our_synonym = all_synonyms_response + .synonyms + .iter() + .find(|s| s.id == synonym_id); + + assert!( + our_synonym.is_some(), + "The newly created synonym was not found in the list." + ); + + // --- 6. Delete the synonym (via `synonym`) --- + let delete_result = client + .collection(&collection_name) + .synonym(&synonym_id) + .delete() + .await; + assert!(delete_result.is_ok(), "Failed to delete synonym"); + let delete_response = delete_result.unwrap(); + assert_eq!(delete_response.id, synonym_id); + + // --- 7. Verify Deletion --- + let get_after_delete_result = client + .collection(&collection_name) + .synonym(&synonym_id) + .get() + .await; + assert!( + get_after_delete_result.is_err(), + "Synonym should not exist after deletion" + ); + + // --- 8. Clean up the collection --- + let delete_collection_result = client.collection(&collection_name).delete().await; + assert!( + delete_collection_result.is_ok(), + "Failed to delete collection after synonym test" + ); +} diff --git a/typesense_codegen/src/apis/documents_api.rs b/typesense_codegen/src/apis/documents_api.rs index b6ebf75..e5335ff 100644 --- a/typesense_codegen/src/apis/documents_api.rs +++ b/typesense_codegen/src/apis/documents_api.rs @@ -4,15 +4,14 @@ * An open source search engine for building delightful search experiences. * * The version of the OpenAPI document: 28.0 - * + * * Generated by: https://openapi-generator.tech */ - -use reqwest; -use serde::{Deserialize, Serialize, de::Error as _}; +use super::{configuration, ContentType, Error}; use crate::{apis::ResponseContent, models}; -use super::{Error, configuration, ContentType}; +use reqwest; +use serde::{de::Error as _, Deserialize, Serialize}; /// struct for passing parameters to the method [`delete_document`] #[derive(Clone, Debug)] @@ -20,7 +19,7 @@ pub struct DeleteDocumentParams { /// The name of the collection to search for the document under pub collection_name: String, /// The Document ID - pub document_id: String + pub document_id: String, } /// struct for passing parameters to the method [`delete_documents`] @@ -31,7 +30,7 @@ pub struct DeleteDocumentsParams { pub filter_by: Option, pub batch_size: Option, pub ignore_not_found: Option, - pub truncate: Option + pub truncate: Option, } /// struct for passing parameters to the method [`delete_search_override`] @@ -40,7 +39,7 @@ pub struct DeleteSearchOverrideParams { /// The name of the collection pub collection_name: String, /// The ID of the search override to delete - pub override_id: String + pub override_id: String, } /// struct for passing parameters to the method [`export_documents`] @@ -50,7 +49,7 @@ pub struct ExportDocumentsParams { pub collection_name: String, pub filter_by: Option, pub include_fields: Option, - pub exclude_fields: Option + pub exclude_fields: Option, } /// struct for passing parameters to the method [`get_document`] @@ -59,7 +58,7 @@ pub struct GetDocumentParams { /// The name of the collection to search for the document under pub collection_name: String, /// The Document ID - pub document_id: String + pub document_id: String, } /// struct for passing parameters to the method [`get_search_override`] @@ -68,14 +67,14 @@ pub struct GetSearchOverrideParams { /// The name of the collection pub collection_name: String, /// The id of the search override - pub override_id: String + pub override_id: String, } /// struct for passing parameters to the method [`get_search_overrides`] #[derive(Clone, Debug)] pub struct GetSearchOverridesParams { /// The name of the collection - pub collection_name: String + pub collection_name: String, } /// struct for passing parameters to the method [`import_documents`] @@ -90,7 +89,7 @@ pub struct ImportDocumentsParams { pub remote_embedding_batch_size: Option, pub return_doc: Option, pub action: Option, - pub dirty_values: Option + pub dirty_values: Option, } /// struct for passing parameters to the method [`index_document`] @@ -103,7 +102,7 @@ pub struct IndexDocumentParams { /// Additional action to perform pub action: Option, /// Dealing with Dirty Data - pub dirty_values: Option + pub dirty_values: Option, } /// struct for passing parameters to the method [`multi_search`] @@ -172,7 +171,7 @@ pub struct MultiSearchParams { pub conversation: Option, pub conversation_model_id: Option, pub conversation_id: Option, - pub multi_search_searches_parameter: Option + pub multi_search_searches_parameter: Option, } /// struct for passing parameters to the method [`search_collection`] @@ -248,7 +247,7 @@ pub struct SearchCollectionParams { pub voice_query: Option, pub conversation: Option, pub conversation_model_id: Option, - pub conversation_id: Option + pub conversation_id: Option, } /// struct for passing parameters to the method [`update_document`] @@ -261,7 +260,7 @@ pub struct UpdateDocumentParams { /// The document object with fields to be updated pub body: serde_json::Value, /// Dealing with Dirty Data - pub dirty_values: Option + pub dirty_values: Option, } /// struct for passing parameters to the method [`update_documents`] @@ -271,7 +270,7 @@ pub struct UpdateDocumentsParams { pub collection_name: String, /// The document fields to be updated pub body: serde_json::Value, - pub filter_by: Option + pub filter_by: Option, } /// struct for passing parameters to the method [`upsert_search_override`] @@ -282,10 +281,9 @@ pub struct UpsertSearchOverrideParams { /// The ID of the search override to create/update pub override_id: String, /// The search override object to be created/updated - pub search_override_schema: models::SearchOverrideSchema + pub search_override_schema: models::SearchOverrideSchema, } - /// struct for typed errors of method [`delete_document`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] @@ -399,12 +397,20 @@ pub enum UpsertSearchOverrideError { UnknownValue(serde_json::Value), } - /// Delete an individual document from a collection by using its ID. -pub async fn delete_document(configuration: &configuration::Configuration, params: DeleteDocumentParams) -> Result> { - - let uri_str = format!("{}/collections/{collectionName}/documents/{documentId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), documentId=crate::apis::urlencode(params.document_id)); - let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); +pub async fn delete_document( + configuration: &configuration::Configuration, + params: DeleteDocumentParams, +) -> Result> { + let uri_str = format!( + "{}/collections/{collectionName}/documents/{documentId}", + configuration.base_path, + collectionName = crate::apis::urlencode(params.collection_name), + documentId = crate::apis::urlencode(params.document_id) + ); + let mut req_builder = configuration + .client + .request(reqwest::Method::DELETE, &uri_str); if let Some(ref user_agent) = configuration.user_agent { req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); @@ -439,15 +445,27 @@ pub async fn delete_document(configuration: &configuration::Configuration, param } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } /// Delete a bunch of documents that match a specific filter condition. Use the `batch_size` parameter to control the number of documents that should deleted at a time. A larger value will speed up deletions, but will impact performance of other operations running on the server. -pub async fn delete_documents(configuration: &configuration::Configuration, params: DeleteDocumentsParams) -> Result> { - - let uri_str = format!("{}/collections/{collectionName}/documents", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); - let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); +pub async fn delete_documents( + configuration: &configuration::Configuration, + params: DeleteDocumentsParams, +) -> Result> { + let uri_str = format!( + "{}/collections/{collectionName}/documents", + configuration.base_path, + collectionName = crate::apis::urlencode(params.collection_name) + ); + let mut req_builder = configuration + .client + .request(reqwest::Method::DELETE, &uri_str); if let Some(ref param_value) = params.filter_by { req_builder = req_builder.query(&[("filter_by", ¶m_value.to_string())]); @@ -494,14 +512,27 @@ pub async fn delete_documents(configuration: &configuration::Configuration, para } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } -pub async fn delete_search_override(configuration: &configuration::Configuration, params: DeleteSearchOverrideParams) -> Result> { - - let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), overrideId=crate::apis::urlencode(params.override_id)); - let mut req_builder = configuration.client.request(reqwest::Method::DELETE, &uri_str); +pub async fn delete_search_override( + configuration: &configuration::Configuration, + params: DeleteSearchOverrideParams, +) -> Result> { + let uri_str = format!( + "{}/collections/{collectionName}/overrides/{overrideId}", + configuration.base_path, + collectionName = crate::apis::urlencode(params.collection_name), + overrideId = crate::apis::urlencode(params.override_id) + ); + let mut req_builder = configuration + .client + .request(reqwest::Method::DELETE, &uri_str); if let Some(ref user_agent) = configuration.user_agent { req_builder = req_builder.header(reqwest::header::USER_AGENT, user_agent.clone()); @@ -536,14 +567,24 @@ pub async fn delete_search_override(configuration: &configuration::Configuration } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } /// Export all documents in a collection in JSON lines format. -pub async fn export_documents(configuration: &configuration::Configuration, params: ExportDocumentsParams) -> Result> { - - let uri_str = format!("{}/collections/{collectionName}/documents/export", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); +pub async fn export_documents( + configuration: &configuration::Configuration, + params: ExportDocumentsParams, +) -> Result> { + let uri_str = format!( + "{}/collections/{collectionName}/documents/export", + configuration.base_path, + collectionName = crate::apis::urlencode(params.collection_name) + ); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref param_value) = params.filter_by { @@ -580,22 +621,37 @@ pub async fn export_documents(configuration: &configuration::Configuration, para if !status.is_client_error() && !status.is_server_error() { let content = resp.text().await?; + // changed by hand match content_type { - ContentType::Json => serde_json::from_str(&content).map_err(Error::from), - ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `String`"))), - ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `String`")))), + ContentType::Json | ContentType::Text => Ok(content), + ContentType::Unsupported(unknown_type) => Err(Error::from( + serde_json::Error::custom(format!( + "Received `{unknown_type}` content type response that cannot be converted to `String`" + )), + )), } } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } /// Fetch an individual document from a collection by using its ID. -pub async fn get_document(configuration: &configuration::Configuration, params: GetDocumentParams) -> Result> { - - let uri_str = format!("{}/collections/{collectionName}/documents/{documentId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), documentId=crate::apis::urlencode(params.document_id)); +pub async fn get_document( + configuration: &configuration::Configuration, + params: GetDocumentParams, +) -> Result> { + let uri_str = format!( + "{}/collections/{collectionName}/documents/{documentId}", + configuration.base_path, + collectionName = crate::apis::urlencode(params.collection_name), + documentId = crate::apis::urlencode(params.document_id) + ); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -631,14 +687,25 @@ pub async fn get_document(configuration: &configuration::Configuration, params: } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } /// Retrieve the details of a search override, given its id. -pub async fn get_search_override(configuration: &configuration::Configuration, params: GetSearchOverrideParams) -> Result> { - - let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), overrideId=crate::apis::urlencode(params.override_id)); +pub async fn get_search_override( + configuration: &configuration::Configuration, + params: GetSearchOverrideParams, +) -> Result> { + let uri_str = format!( + "{}/collections/{collectionName}/overrides/{overrideId}", + configuration.base_path, + collectionName = crate::apis::urlencode(params.collection_name), + overrideId = crate::apis::urlencode(params.override_id) + ); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -674,13 +741,23 @@ pub async fn get_search_override(configuration: &configuration::Configuration, p } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } -pub async fn get_search_overrides(configuration: &configuration::Configuration, params: GetSearchOverridesParams) -> Result> { - - let uri_str = format!("{}/collections/{collectionName}/overrides", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); +pub async fn get_search_overrides( + configuration: &configuration::Configuration, + params: GetSearchOverridesParams, +) -> Result> { + let uri_str = format!( + "{}/collections/{collectionName}/overrides", + configuration.base_path, + collectionName = crate::apis::urlencode(params.collection_name) + ); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -716,15 +793,27 @@ pub async fn get_search_overrides(configuration: &configuration::Configuration, } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } /// The documents to be imported must be formatted in a newline delimited JSON structure. You can feed the output file from a Typesense export operation directly as import. -pub async fn import_documents(configuration: &configuration::Configuration, params: ImportDocumentsParams) -> Result> { - - let uri_str = format!("{}/collections/{collectionName}/documents/import", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); - let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); +pub async fn import_documents( + configuration: &configuration::Configuration, + params: ImportDocumentsParams, +) -> Result> { + let uri_str = format!( + "{}/collections/{collectionName}/documents/import", + configuration.base_path, + collectionName = crate::apis::urlencode(params.collection_name) + ); + let mut req_builder = configuration + .client + .request(reqwest::Method::POST, &uri_str); if let Some(ref param_value) = params.batch_size { req_builder = req_builder.query(&[("batch_size", ¶m_value.to_string())]); @@ -733,7 +822,8 @@ pub async fn import_documents(configuration: &configuration::Configuration, para req_builder = req_builder.query(&[("return_id", ¶m_value.to_string())]); } if let Some(ref param_value) = params.remote_embedding_batch_size { - req_builder = req_builder.query(&[("remote_embedding_batch_size", ¶m_value.to_string())]); + req_builder = + req_builder.query(&[("remote_embedding_batch_size", ¶m_value.to_string())]); } if let Some(ref param_value) = params.return_doc { req_builder = req_builder.query(&[("return_doc", ¶m_value.to_string())]); @@ -755,7 +845,8 @@ pub async fn import_documents(configuration: &configuration::Configuration, para }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(¶ms.body); + // changed by hand + req_builder = req_builder.body(params.body); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; @@ -770,23 +861,39 @@ pub async fn import_documents(configuration: &configuration::Configuration, para if !status.is_client_error() && !status.is_server_error() { let content = resp.text().await?; + // changed by hand match content_type { - ContentType::Json => serde_json::from_str(&content).map_err(Error::from), - ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `String`"))), - ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `String`")))), + ContentType::Json | ContentType::Text => Ok(content), + ContentType::Unsupported(unknown_type) => Err(Error::from( + serde_json::Error::custom(format!( + "Received `{unknown_type}` content type response that cannot be converted to `String`" + )), + )), } } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } /// A document to be indexed in a given collection must conform to the schema of the collection. -pub async fn index_document(configuration: &configuration::Configuration, params: IndexDocumentParams) -> Result> { - - let uri_str = format!("{}/collections/{collectionName}/documents", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); - let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); +pub async fn index_document( + configuration: &configuration::Configuration, + params: IndexDocumentParams, +) -> Result> { + let uri_str = format!( + "{}/collections/{collectionName}/documents", + configuration.base_path, + collectionName = crate::apis::urlencode(params.collection_name) + ); + let mut req_builder = configuration + .client + .request(reqwest::Method::POST, &uri_str); if let Some(ref param_value) = params.action { req_builder = req_builder.query(&[("action", ¶m_value.to_string())]); @@ -828,15 +935,23 @@ pub async fn index_document(configuration: &configuration::Configuration, params } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } /// This is especially useful to avoid round-trip network latencies incurred otherwise if each of these requests are sent in separate HTTP requests. You can also use this feature to do a federated search across multiple collections in a single HTTP request. -pub async fn multi_search(configuration: &configuration::Configuration, params: MultiSearchParams) -> Result> { - +pub async fn multi_search( + configuration: &configuration::Configuration, + params: MultiSearchParams, +) -> Result> { let uri_str = format!("{}/multi_search", configuration.base_path); - let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); + let mut req_builder = configuration + .client + .request(reqwest::Method::POST, &uri_str); if let Some(ref param_value) = params.q { req_builder = req_builder.query(&[("q", ¶m_value.to_string())]); @@ -911,7 +1026,8 @@ pub async fn multi_search(configuration: &configuration::Configuration, params: req_builder = req_builder.query(&[("highlight_full_fields", ¶m_value.to_string())]); } if let Some(ref param_value) = params.highlight_affix_num_tokens { - req_builder = req_builder.query(&[("highlight_affix_num_tokens", ¶m_value.to_string())]); + req_builder = + req_builder.query(&[("highlight_affix_num_tokens", ¶m_value.to_string())]); } if let Some(ref param_value) = params.highlight_start_tag { req_builder = req_builder.query(&[("highlight_start_tag", ¶m_value.to_string())]); @@ -932,7 +1048,10 @@ pub async fn multi_search(configuration: &configuration::Configuration, params: req_builder = req_builder.query(&[("typo_tokens_threshold", ¶m_value.to_string())]); } if let Some(ref param_value) = params.enable_typos_for_alpha_numerical_tokens { - req_builder = req_builder.query(&[("enable_typos_for_alpha_numerical_tokens", ¶m_value.to_string())]); + req_builder = req_builder.query(&[( + "enable_typos_for_alpha_numerical_tokens", + ¶m_value.to_string(), + )]); } if let Some(ref param_value) = params.filter_curated_hits { req_builder = req_builder.query(&[("filter_curated_hits", ¶m_value.to_string())]); @@ -974,10 +1093,14 @@ pub async fn multi_search(configuration: &configuration::Configuration, params: req_builder = req_builder.query(&[("prioritize_token_position", ¶m_value.to_string())]); } if let Some(ref param_value) = params.prioritize_num_matching_fields { - req_builder = req_builder.query(&[("prioritize_num_matching_fields", ¶m_value.to_string())]); + req_builder = + req_builder.query(&[("prioritize_num_matching_fields", ¶m_value.to_string())]); } if let Some(ref param_value) = params.enable_typos_for_numerical_tokens { - req_builder = req_builder.query(&[("enable_typos_for_numerical_tokens", ¶m_value.to_string())]); + req_builder = req_builder.query(&[( + "enable_typos_for_numerical_tokens", + ¶m_value.to_string(), + )]); } if let Some(ref param_value) = params.exhaustive_search { req_builder = req_builder.query(&[("exhaustive_search", ¶m_value.to_string())]); @@ -1001,10 +1124,12 @@ pub async fn multi_search(configuration: &configuration::Configuration, params: req_builder = req_builder.query(&[("vector_query", ¶m_value.to_string())]); } if let Some(ref param_value) = params.remote_embedding_timeout_ms { - req_builder = req_builder.query(&[("remote_embedding_timeout_ms", ¶m_value.to_string())]); + req_builder = + req_builder.query(&[("remote_embedding_timeout_ms", ¶m_value.to_string())]); } if let Some(ref param_value) = params.remote_embedding_num_tries { - req_builder = req_builder.query(&[("remote_embedding_num_tries", ¶m_value.to_string())]); + req_builder = + req_builder.query(&[("remote_embedding_num_tries", ¶m_value.to_string())]); } if let Some(ref param_value) = params.facet_strategy { req_builder = req_builder.query(&[("facet_strategy", ¶m_value.to_string())]); @@ -1061,14 +1186,24 @@ pub async fn multi_search(configuration: &configuration::Configuration, params: } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } /// Search for documents in a collection that match the search criteria. -pub async fn search_collection(configuration: &configuration::Configuration, params: SearchCollectionParams) -> Result> { - - let uri_str = format!("{}/collections/{collectionName}/documents/search", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); +pub async fn search_collection( + configuration: &configuration::Configuration, + params: SearchCollectionParams, +) -> Result> { + let uri_str = format!( + "{}/collections/{collectionName}/documents/search", + configuration.base_path, + collectionName = crate::apis::urlencode(params.collection_name) + ); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref param_value) = params.q { @@ -1153,7 +1288,8 @@ pub async fn search_collection(configuration: &configuration::Configuration, par req_builder = req_builder.query(&[("highlight_full_fields", ¶m_value.to_string())]); } if let Some(ref param_value) = params.highlight_affix_num_tokens { - req_builder = req_builder.query(&[("highlight_affix_num_tokens", ¶m_value.to_string())]); + req_builder = + req_builder.query(&[("highlight_affix_num_tokens", ¶m_value.to_string())]); } if let Some(ref param_value) = params.highlight_start_tag { req_builder = req_builder.query(&[("highlight_start_tag", ¶m_value.to_string())]); @@ -1177,7 +1313,10 @@ pub async fn search_collection(configuration: &configuration::Configuration, par req_builder = req_builder.query(&[("typo_tokens_threshold", ¶m_value.to_string())]); } if let Some(ref param_value) = params.enable_typos_for_alpha_numerical_tokens { - req_builder = req_builder.query(&[("enable_typos_for_alpha_numerical_tokens", ¶m_value.to_string())]); + req_builder = req_builder.query(&[( + "enable_typos_for_alpha_numerical_tokens", + ¶m_value.to_string(), + )]); } if let Some(ref param_value) = params.filter_curated_hits { req_builder = req_builder.query(&[("filter_curated_hits", ¶m_value.to_string())]); @@ -1225,10 +1364,14 @@ pub async fn search_collection(configuration: &configuration::Configuration, par req_builder = req_builder.query(&[("prioritize_token_position", ¶m_value.to_string())]); } if let Some(ref param_value) = params.prioritize_num_matching_fields { - req_builder = req_builder.query(&[("prioritize_num_matching_fields", ¶m_value.to_string())]); + req_builder = + req_builder.query(&[("prioritize_num_matching_fields", ¶m_value.to_string())]); } if let Some(ref param_value) = params.enable_typos_for_numerical_tokens { - req_builder = req_builder.query(&[("enable_typos_for_numerical_tokens", ¶m_value.to_string())]); + req_builder = req_builder.query(&[( + "enable_typos_for_numerical_tokens", + ¶m_value.to_string(), + )]); } if let Some(ref param_value) = params.exhaustive_search { req_builder = req_builder.query(&[("exhaustive_search", ¶m_value.to_string())]); @@ -1252,10 +1395,12 @@ pub async fn search_collection(configuration: &configuration::Configuration, par req_builder = req_builder.query(&[("vector_query", ¶m_value.to_string())]); } if let Some(ref param_value) = params.remote_embedding_timeout_ms { - req_builder = req_builder.query(&[("remote_embedding_timeout_ms", ¶m_value.to_string())]); + req_builder = + req_builder.query(&[("remote_embedding_timeout_ms", ¶m_value.to_string())]); } if let Some(ref param_value) = params.remote_embedding_num_tries { - req_builder = req_builder.query(&[("remote_embedding_num_tries", ¶m_value.to_string())]); + req_builder = + req_builder.query(&[("remote_embedding_num_tries", ¶m_value.to_string())]); } if let Some(ref param_value) = params.facet_strategy { req_builder = req_builder.query(&[("facet_strategy", ¶m_value.to_string())]); @@ -1311,15 +1456,28 @@ pub async fn search_collection(configuration: &configuration::Configuration, par } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } /// Update an individual document from a collection by using its ID. The update can be partial. -pub async fn update_document(configuration: &configuration::Configuration, params: UpdateDocumentParams) -> Result> { - - let uri_str = format!("{}/collections/{collectionName}/documents/{documentId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), documentId=crate::apis::urlencode(params.document_id)); - let mut req_builder = configuration.client.request(reqwest::Method::PATCH, &uri_str); +pub async fn update_document( + configuration: &configuration::Configuration, + params: UpdateDocumentParams, +) -> Result> { + let uri_str = format!( + "{}/collections/{collectionName}/documents/{documentId}", + configuration.base_path, + collectionName = crate::apis::urlencode(params.collection_name), + documentId = crate::apis::urlencode(params.document_id) + ); + let mut req_builder = configuration + .client + .request(reqwest::Method::PATCH, &uri_str); if let Some(ref param_value) = params.dirty_values { req_builder = req_builder.query(&[("dirty_values", ¶m_value.to_string())]); @@ -1358,15 +1516,27 @@ pub async fn update_document(configuration: &configuration::Configuration, param } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } /// The filter_by query parameter is used to filter to specify a condition against which the documents are matched. The request body contains the fields that should be updated for any documents that match the filter condition. This endpoint is only available if the Typesense server is version `0.25.0.rc12` or later. -pub async fn update_documents(configuration: &configuration::Configuration, params: UpdateDocumentsParams) -> Result> { - - let uri_str = format!("{}/collections/{collectionName}/documents", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name)); - let mut req_builder = configuration.client.request(reqwest::Method::PATCH, &uri_str); +pub async fn update_documents( + configuration: &configuration::Configuration, + params: UpdateDocumentsParams, +) -> Result> { + let uri_str = format!( + "{}/collections/{collectionName}/documents", + configuration.base_path, + collectionName = crate::apis::urlencode(params.collection_name) + ); + let mut req_builder = configuration + .client + .request(reqwest::Method::PATCH, &uri_str); if let Some(ref param_value) = params.filter_by { req_builder = req_builder.query(&[("filter_by", ¶m_value.to_string())]); @@ -1405,14 +1575,25 @@ pub async fn update_documents(configuration: &configuration::Configuration, para } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } /// Create or update an override to promote certain documents over others. Using overrides, you can include or exclude specific documents for a given query. -pub async fn upsert_search_override(configuration: &configuration::Configuration, params: UpsertSearchOverrideParams) -> Result> { - - let uri_str = format!("{}/collections/{collectionName}/overrides/{overrideId}", configuration.base_path, collectionName=crate::apis::urlencode(params.collection_name), overrideId=crate::apis::urlencode(params.override_id)); +pub async fn upsert_search_override( + configuration: &configuration::Configuration, + params: UpsertSearchOverrideParams, +) -> Result> { + let uri_str = format!( + "{}/collections/{collectionName}/overrides/{overrideId}", + configuration.base_path, + collectionName = crate::apis::urlencode(params.collection_name), + overrideId = crate::apis::urlencode(params.override_id) + ); let mut req_builder = configuration.client.request(reqwest::Method::PUT, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -1449,7 +1630,10 @@ pub async fn upsert_search_override(configuration: &configuration::Configuration } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } - diff --git a/typesense_codegen/src/apis/stemming_api.rs b/typesense_codegen/src/apis/stemming_api.rs index 2432031..d8aaf50 100644 --- a/typesense_codegen/src/apis/stemming_api.rs +++ b/typesense_codegen/src/apis/stemming_api.rs @@ -4,21 +4,20 @@ * An open source search engine for building delightful search experiences. * * The version of the OpenAPI document: 28.0 - * + * * Generated by: https://openapi-generator.tech */ - -use reqwest; -use serde::{Deserialize, Serialize, de::Error as _}; +use super::{configuration, ContentType, Error}; use crate::{apis::ResponseContent, models}; -use super::{Error, configuration, ContentType}; +use reqwest; +use serde::{de::Error as _, Deserialize, Serialize}; /// struct for passing parameters to the method [`get_stemming_dictionary`] #[derive(Clone, Debug)] pub struct GetStemmingDictionaryParams { /// The ID of the dictionary to retrieve - pub dictionary_id: String + pub dictionary_id: String, } /// struct for passing parameters to the method [`import_stemming_dictionary`] @@ -27,10 +26,9 @@ pub struct ImportStemmingDictionaryParams { /// The ID to assign to the dictionary pub id: String, /// The JSONL file containing word mappings - pub body: String + pub body: String, } - /// struct for typed errors of method [`get_stemming_dictionary`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] @@ -54,11 +52,16 @@ pub enum ListStemmingDictionariesError { UnknownValue(serde_json::Value), } - /// Fetch details of a specific stemming dictionary. -pub async fn get_stemming_dictionary(configuration: &configuration::Configuration, params: GetStemmingDictionaryParams) -> Result> { - - let uri_str = format!("{}/stemming/dictionaries/{dictionaryId}", configuration.base_path, dictionaryId=crate::apis::urlencode(params.dictionary_id)); +pub async fn get_stemming_dictionary( + configuration: &configuration::Configuration, + params: GetStemmingDictionaryParams, +) -> Result> { + let uri_str = format!( + "{}/stemming/dictionaries/{dictionaryId}", + configuration.base_path, + dictionaryId = crate::apis::urlencode(params.dictionary_id) + ); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); if let Some(ref user_agent) = configuration.user_agent { @@ -94,15 +97,23 @@ pub async fn get_stemming_dictionary(configuration: &configuration::Configuratio } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } /// Upload a JSONL file containing word mappings to create or update a stemming dictionary. -pub async fn import_stemming_dictionary(configuration: &configuration::Configuration, params: ImportStemmingDictionaryParams) -> Result> { - +pub async fn import_stemming_dictionary( + configuration: &configuration::Configuration, + params: ImportStemmingDictionaryParams, +) -> Result> { let uri_str = format!("{}/stemming/dictionaries/import", configuration.base_path); - let mut req_builder = configuration.client.request(reqwest::Method::POST, &uri_str); + let mut req_builder = configuration + .client + .request(reqwest::Method::POST, &uri_str); req_builder = req_builder.query(&[("id", ¶ms.id.to_string())]); if let Some(ref user_agent) = configuration.user_agent { @@ -116,7 +127,8 @@ pub async fn import_stemming_dictionary(configuration: &configuration::Configura }; req_builder = req_builder.header("X-TYPESENSE-API-KEY", value); }; - req_builder = req_builder.json(¶ms.body); + // changed by hand + req_builder = req_builder.body(params.body); let req = req_builder.build()?; let resp = configuration.client.execute(req).await?; @@ -131,21 +143,30 @@ pub async fn import_stemming_dictionary(configuration: &configuration::Configura if !status.is_client_error() && !status.is_server_error() { let content = resp.text().await?; + // changed by hand match content_type { - ContentType::Json => serde_json::from_str(&content).map_err(Error::from), - ContentType::Text => return Err(Error::from(serde_json::Error::custom("Received `text/plain` content type response that cannot be converted to `String`"))), - ContentType::Unsupported(unknown_type) => return Err(Error::from(serde_json::Error::custom(format!("Received `{unknown_type}` content type response that cannot be converted to `String`")))), + ContentType::Json | ContentType::Text => Ok(content), + ContentType::Unsupported(unknown_type) => Err(Error::from( + serde_json::Error::custom(format!( + "Received `{unknown_type}` content type response that cannot be converted to `String`" + )), + )), } } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } /// Retrieve a list of all available stemming dictionaries. -pub async fn list_stemming_dictionaries(configuration: &configuration::Configuration) -> Result> { - +pub async fn list_stemming_dictionaries( + configuration: &configuration::Configuration, +) -> Result> { let uri_str = format!("{}/stemming/dictionaries", configuration.base_path); let mut req_builder = configuration.client.request(reqwest::Method::GET, &uri_str); @@ -182,7 +203,10 @@ pub async fn list_stemming_dictionaries(configuration: &configuration::Configura } else { let content = resp.text().await?; let entity: Option = serde_json::from_str(&content).ok(); - Err(Error::ResponseError(ResponseContent { status, content, entity })) + Err(Error::ResponseError(ResponseContent { + status, + content, + entity, + })) } } - From 0e4002c8ec419c3015b2cc6db890b42a41f4dfb1 Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Tue, 29 Jul 2025 15:17:34 +0700 Subject: [PATCH 06/21] feat: generic collection and library module restructuring --- typesense/src/client/alias.rs | 2 +- typesense/src/client/aliases.rs | 2 +- typesense/src/client/analytics/events.rs | 2 +- typesense/src/client/analytics/mod.rs | 2 +- typesense/src/client/analytics/rule.rs | 2 +- typesense/src/client/analytics/rules.rs | 2 +- typesense/src/client/collection/document.rs | 125 ++++++-- typesense/src/client/collection/documents.rs | 86 ++++-- typesense/src/client/collection/mod.rs | 42 +-- .../src/client/collection/search_override.rs | 2 +- .../src/client/collection/search_overrides.rs | 2 +- typesense/src/client/collection/synonym.rs | 2 +- typesense/src/client/collection/synonyms.rs | 2 +- typesense/src/client/collections.rs | 2 +- typesense/src/client/conversations/mod.rs | 8 +- typesense/src/client/conversations/model.rs | 2 +- typesense/src/client/conversations/models.rs | 2 +- typesense/src/client/key.rs | 2 +- typesense/src/client/keys.rs | 2 +- typesense/src/client/mod.rs | 267 +++++++++--------- typesense/src/client/multi_search.rs | 2 +- typesense/src/client/operations.rs | 2 +- typesense/src/client/preset.rs | 2 +- typesense/src/client/presets.rs | 2 +- typesense/src/client/stopword.rs | 2 +- typesense/src/client/stopwords.rs | 2 +- typesense/src/error.rs | 54 ++++ typesense/src/lib.rs | 10 +- typesense/src/models/mod.rs | 111 ++++++++ typesense/src/models/search.rs | 144 ++++++++++ typesense/tests/client/aliases_test.rs | 2 +- typesense/tests/client/analytics_test.rs | 3 +- typesense/tests/client/client_test.rs | 2 +- typesense/tests/client/collections_test.rs | 2 +- .../tests/client/conversation_models_test.rs | 11 +- typesense/tests/client/documents_test.rs | 172 +++++++++-- typesense/tests/client/keys_test.rs | 2 +- typesense/tests/client/mod.rs | 2 +- typesense/tests/client/multi_search_test.rs | 4 +- typesense/tests/client/presets_test.rs | 2 +- .../tests/client/search_overrides_test.rs | 2 +- typesense/tests/client/stopwords_test.rs | 2 +- typesense/tests/client/synonyms_test.rs | 2 +- 43 files changed, 825 insertions(+), 272 deletions(-) create mode 100644 typesense/src/error.rs create mode 100644 typesense/src/models/mod.rs create mode 100644 typesense/src/models/search.rs diff --git a/typesense/src/client/alias.rs b/typesense/src/client/alias.rs index c49bf86..53b0360 100644 --- a/typesense/src/client/alias.rs +++ b/typesense/src/client/alias.rs @@ -2,7 +2,7 @@ //! //! An `Alias` instance is created via the main `client.alias()` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{collections_api, configuration}, diff --git a/typesense/src/client/aliases.rs b/typesense/src/client/aliases.rs index 53594ce..4420d94 100644 --- a/typesense/src/client/aliases.rs +++ b/typesense/src/client/aliases.rs @@ -2,7 +2,7 @@ //! //! An `Aliases` instance is created via the main `client.aliases()` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{collections_api, configuration}, diff --git a/typesense/src/client/analytics/events.rs b/typesense/src/client/analytics/events.rs index 55bafac..b07af68 100644 --- a/typesense/src/client/analytics/events.rs +++ b/typesense/src/client/analytics/events.rs @@ -2,7 +2,7 @@ //! //! An `Events` instance is created via the `Client::analytics().events()` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{analytics_api, configuration}, diff --git a/typesense/src/client/analytics/mod.rs b/typesense/src/client/analytics/mod.rs index cb49810..1dc5f93 100644 --- a/typesense/src/client/analytics/mod.rs +++ b/typesense/src/client/analytics/mod.rs @@ -4,7 +4,7 @@ pub mod events; pub mod rule; pub mod rules; -use super::{Client, Error}; +use crate::Client; pub use events::Events; pub use rule::Rule; pub use rules::Rules; diff --git a/typesense/src/client/analytics/rule.rs b/typesense/src/client/analytics/rule.rs index 7fc8222..b1c1372 100644 --- a/typesense/src/client/analytics/rule.rs +++ b/typesense/src/client/analytics/rule.rs @@ -2,7 +2,7 @@ //! //! An `Rule` instance is created via the `Client::analytics().rule("rule_name")` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{analytics_api, configuration}, diff --git a/typesense/src/client/analytics/rules.rs b/typesense/src/client/analytics/rules.rs index 8425048..adfbd55 100644 --- a/typesense/src/client/analytics/rules.rs +++ b/typesense/src/client/analytics/rules.rs @@ -2,7 +2,7 @@ //! //! An `Rules` instance is created via the `Client::analytics().rules()` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{analytics_api, configuration}, diff --git a/typesense/src/client/collection/document.rs b/typesense/src/client/collection/document.rs index 8ff1cd5..0f2e48d 100644 --- a/typesense/src/client/collection/document.rs +++ b/typesense/src/client/collection/document.rs @@ -2,84 +2,157 @@ //! //! An instance of `Document` is scoped to a specific document and is created //! via a parent `Collection` struct, for example: -//! `client.collection("collection_name").document("document_id")` +//! `client.collection::("books").document("123")` -use super::{Client, Error}; +use crate::{Client, Error}; +use serde::{de::DeserializeOwned, Serialize}; use std::sync::Arc; -use typesense_codegen::apis::{configuration, documents_api}; +use typesense_codegen::{ + apis::{configuration, documents_api}, + models, +}; /// Provides methods for interacting with a single document within a specific Typesense collection. /// -/// This struct is created by calling a method like `client.collection("collection_name").document("document_id")`. -pub struct Document<'a> { +/// This struct is created by calling a method like `client.collection::("collection_name").document("document_id")`. +/// The generic `T` represents the shape of the document and must implement `Serialize` and `DeserializeOwned`. +/// If `T` is not specified, it defaults to `serde_json::Value` for schemaless interactions. +pub struct Document<'a, T = serde_json::Value> +where + T: DeserializeOwned + Serialize + Send + Sync, +{ pub(super) client: &'a Client, pub(super) collection_name: &'a str, pub(super) document_id: &'a str, + pub(super) _phantom: std::marker::PhantomData, } -impl<'a> Document<'a> { +impl<'a, T> Document<'a, T> +where + T: DeserializeOwned + Serialize + Send + Sync, +{ /// Creates a new `Document` instance for a specific document ID. - /// This is intended for internal use by the parent `Documents` struct. + /// This is intended for internal use by the parent `Collection` struct. pub(super) fn new(client: &'a Client, collection_name: &'a str, document_id: &'a str) -> Self { Self { client, collection_name, document_id, + _phantom: std::marker::PhantomData, } } - /// Fetches this individual document from the collection. - pub async fn retrieve( - &self, - ) -> Result> { + /// Fetches this individual document from the collection and deserializes it into `T`. + /// + /// # Returns + /// A `Result` containing the strongly-typed document `T` if successful. + pub async fn retrieve(&self) -> Result> { let params = documents_api::GetDocumentParams { collection_name: self.collection_name.to_string(), document_id: self.document_id.to_string(), }; - self.client + let result_value = self + .client .execute(|config: Arc| { let params_for_move = params.clone(); async move { documents_api::get_document(&config, params_for_move).await } }) - .await + .await?; + + // Deserialize the raw JSON value into the user's type T. + serde_json::from_value(result_value).map_err(Error::from) } /// Updates this individual document. The update can be partial. + /// The updated full document is returned. /// /// # Arguments - /// * `document` - A `serde_json::Value` containing the fields to update. - pub async fn update( + /// * `partial_document` - A serializable struct or a `serde_json::Value` containing the fields to update. + /// For example: `serde_json::json!({ "in_stock": false })`. + /// * `params` - An optional `DocumentIndexParameters` struct to specify additional + /// parameters, such as `dirty_values` which determines what Typesense should do when the type of a particular field being indexed does not match the previously inferred type for that field, or the one defined in the collection's schema. + /// + /// # Returns + /// A `Result` containing the full, updated document deserialized into `T`. + /// + /// # Example + /// ```no_run + /// # use serde::{Serialize, Deserialize}; + /// # use typesense::{Client, MultiNodeConfiguration, models}; + /// # use reqwest::Url; + /// # #[derive(Serialize, Deserialize)] + /// # struct Book { id: String, title: String, pages: i32 } + /// # + /// # async fn run() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// let book_update = serde_json::json!({ "pages": 654 }); + /// + /// // Simple update + /// let updated_book = client.collection_of::("books").document("123") + /// .update(&book_update, None) + /// .await?; + /// + /// // Update with additional parameters + /// let params = models::DocumentIndexParameters { + /// dirty_values: Some(models::DirtyValues::CoerceOrReject), + /// }; + /// let updated_book_with_params = client.collection_of::("books").document("124") + /// .update(&book_update, Some(params)) + /// .await?; + /// # + /// # Ok(()) + /// # } + /// ``` + pub async fn update( &self, - document: serde_json::Value, - ) -> Result> { + partial_document: &U, + params: Option, + ) -> Result> { let params = documents_api::UpdateDocumentParams { collection_name: self.collection_name.to_string(), document_id: self.document_id.to_string(), - body: document, - dirty_values: None, + body: serde_json::to_value(partial_document)?, + dirty_values: params.unwrap_or_default().dirty_values, }; - self.client + + let result_value = self + .client .execute(|config: Arc| { let params_for_move = params.clone(); async move { documents_api::update_document(&config, params_for_move).await } }) - .await + .await?; + + // Deserialize the raw JSON value of the updated document into T. + serde_json::from_value(result_value).map_err(Error::from) } /// Deletes this individual document from the collection. - pub async fn delete( - &self, - ) -> Result> { + /// The deleted document is returned. + /// + /// # Returns + /// A `Result` containing the deleted document deserialized into `T`. + pub async fn delete(&self) -> Result> { let params = documents_api::DeleteDocumentParams { collection_name: self.collection_name.to_string(), document_id: self.document_id.to_string(), }; - self.client + + let result_value = self + .client .execute(|config: Arc| { let params_for_move = params.clone(); async move { documents_api::delete_document(&config, params_for_move).await } }) - .await + .await?; + + // Deserialize the raw JSON value of the deleted document into T. + serde_json::from_value(result_value).map_err(Error::from) } } diff --git a/typesense/src/client/collection/documents.rs b/typesense/src/client/collection/documents.rs index e4b7360..d36d6fa 100644 --- a/typesense/src/client/collection/documents.rs +++ b/typesense/src/client/collection/documents.rs @@ -1,27 +1,38 @@ //! Provides access to the document, search, and override-related API endpoints. //! //! An instance of `Documents` is scoped to a specific collection and is created -//! via the main `client.collection("collection_name").documents()` method. +//! via the main `client.collection("collection_name").documents()` method or +//! `client.collection_of::("...").documents()`. -use super::{Client, Error}; +use crate::models::SearchResult; +use crate::{Client, Error}; +use serde::{de::DeserializeOwned, Serialize}; use std::sync::Arc; use typesense_codegen::{ apis::{configuration, documents_api}, models::{ - self, DeleteDocumentsParameters, ExportDocumentsParameters, ImportDocumentsParameters, - UpdateDocumentsParameters, + self as raw_models, DeleteDocumentsParameters, DocumentIndexParameters, + ExportDocumentsParameters, ImportDocumentsParameters, UpdateDocumentsParameters, }, }; - /// Provides methods for interacting with documents within a specific Typesense collection. /// -/// This struct is created by calling `client.collection("collection_name").documents("collection_name")`. -pub struct Documents<'a> { +/// This struct is generic over the document type `T`. If created via `client.collection(...)`, +/// `T` defaults to `serde_json::Value`. If created via `client.collection_of::(...)`, +/// `T` will be `MyType`. +pub struct Documents<'a, T = serde_json::Value> +where + T: DeserializeOwned + Serialize + Send + Sync, +{ pub(super) client: &'a Client, pub(super) collection_name: &'a str, + pub(super) _phantom: std::marker::PhantomData, } -impl<'a> Documents<'a> { +impl<'a, T> Documents<'a, T> +where + T: DeserializeOwned + Serialize + Send + Sync, +{ /// Creates a new `Documents` instance. /// /// This is typically called by `Client::documents()`. @@ -29,6 +40,7 @@ impl<'a> Documents<'a> { Self { client, collection_name, + _phantom: std::marker::PhantomData, } } @@ -43,12 +55,13 @@ impl<'a> Documents<'a> { &self, document: serde_json::Value, action: &str, + params: Option, ) -> Result> { let params = documents_api::IndexDocumentParams { collection_name: self.collection_name.to_string(), body: document, action: Some(action.to_string()), - dirty_values: None, // Or expose this as an argument if needed + dirty_values: params.unwrap_or_default().dirty_values, // Or expose this as an argument if needed }; self.client .execute(|config: Arc| { @@ -59,30 +72,40 @@ impl<'a> Documents<'a> { } /// Creates a new document in the collection. - /// Fails if a document with the same id already exists /// - /// If the document has an `id` field of type `string`, it will be used as the document's ID. - /// Otherwise, Typesense will auto-generate an ID. + /// Fails if a document with the same ID already exists. If the document has an `id` field + /// of type `string`, it will be used as the document's ID. Otherwise, Typesense will + /// auto-generate an ID. The newly indexed document is returned. /// /// # Arguments - /// * `document` - A `serde_json::Value` representing the document to create. + /// * `document` - A reference to the document to create. + /// * `params` - Optional parameters like `dirty_values`. pub async fn create( &self, - document: serde_json::Value, - ) -> Result> { - self.index(document, "create").await + document: &T, + params: Option, + ) -> Result> { + let doc_value = serde_json::to_value(document)?; + let result_value = self.index(doc_value, "create", params).await?; + serde_json::from_value(result_value).map_err(Error::from) } - /// Creates a new document or updates an existing document if a document with the same id already exists. - /// Requires the whole document to be sent. For partial updates, use the `update()` action. + /// Creates a new document or updates an existing one if an ID match is found. + /// + /// This method requires the full document to be sent. For partial updates, use + /// `collection.document("...").update()`. The indexed document is returned. /// /// # Arguments - /// * `document` - A `serde_json::Value` representing the document to upsert. + /// * `document` - A reference to the document to upsert. + /// * `params` - Optional parameters like `dirty_values`. pub async fn upsert( &self, - document: serde_json::Value, - ) -> Result> { - self.index(document, "upsert").await + document: &T, + params: Option, + ) -> Result> { + let doc_value = serde_json::to_value(document)?; + let result_value = self.index(doc_value, "upsert", params).await?; + serde_json::from_value(result_value).map_err(Error::from) } // --- Bulk Operation Methods --- @@ -149,7 +172,7 @@ impl<'a> Documents<'a> { pub async fn delete( &self, params: DeleteDocumentsParameters, - ) -> Result> + ) -> Result> { let params = documents_api::DeleteDocumentsParams { collection_name: self.collection_name.to_string(), @@ -175,7 +198,7 @@ impl<'a> Documents<'a> { &self, document: serde_json::Value, params: UpdateDocumentsParameters, - ) -> Result> + ) -> Result> { let params = documents_api::UpdateDocumentsParams { collection_name: self.collection_name.to_string(), @@ -191,15 +214,14 @@ impl<'a> Documents<'a> { } /// Searches for documents in the collection that match the given criteria. + /// The search results will have their `document` field deserialized into type `T`. /// /// # Arguments /// * `params` - A `SearchParameters` struct containing all search parameters. - /// you can construct it like this: - /// `SearchParameters { q: Some("...".into()), query_by: Some("...".into()), ..Default::default() }` pub async fn search( &self, - params: models::SearchParameters, - ) -> Result> { + params: raw_models::SearchParameters, + ) -> Result, Error> { let search_params = documents_api::SearchCollectionParams { collection_name: self.collection_name.to_string(), @@ -275,11 +297,15 @@ impl<'a> Documents<'a> { nl_query: params.nl_query, }; - self.client + let raw_result = self + .client .execute(|config: Arc| { let params_for_move = search_params.clone(); async move { documents_api::search_collection(&config, params_for_move).await } }) - .await + .await?; + + // Transform the raw API result into our generic, typed SearchResult. + SearchResult::from_raw(raw_result).map_err(Error::from) } } diff --git a/typesense/src/client/collection/mod.rs b/typesense/src/client/collection/mod.rs index 3acad6f..9644ddf 100644 --- a/typesense/src/client/collection/mod.rs +++ b/typesense/src/client/collection/mod.rs @@ -2,20 +2,20 @@ //! //! A `Collections` instance is created via the main `Client::collections()` method. -pub mod document; -pub mod documents; -pub mod search_override; -pub mod search_overrides; -pub mod synonym; -pub mod synonyms; -use super::{Client, Error}; -pub use document::Document; -pub use documents::Documents; -pub use search_override::SearchOverride; -pub use search_overrides::SearchOverrides; +mod document; +mod documents; +mod search_override; +mod search_overrides; +mod synonym; +mod synonyms; +use crate::{Client, Error}; + +use search_override::SearchOverride; +use search_overrides::SearchOverrides; +use serde::{de::DeserializeOwned, Serialize}; use std::sync::Arc; -pub use synonym::Synonym; -pub use synonyms::Synonyms; +use synonym::Synonym; +use synonyms::Synonyms; use typesense_codegen::{ apis::{collections_api, configuration}, models, @@ -24,28 +24,36 @@ use typesense_codegen::{ /// Provides methods for interacting with a Typesense collection. /// /// This struct is created by calling `client.collection("collection_name")`. -pub struct Collection<'a> { +pub struct Collection<'a, T = serde_json::Value> +where + T: DeserializeOwned + Serialize + Send + Sync, +{ pub(super) client: &'a Client, pub(super) collection_name: &'a str, + pub(super) _phantom: std::marker::PhantomData, } -impl<'a> Collection<'a> { +impl<'a, T> Collection<'a, T> +where + T: DeserializeOwned + Serialize + Send + Sync, +{ /// Creates a new `Collection` instance. pub(super) fn new(client: &'a Client, collection_name: &'a str) -> Self { Self { client, collection_name, + _phantom: std::marker::PhantomData, } } // --- Documents Accessors --- /// Provides access to the document-related API endpoints for a specific collection. - pub fn documents(&'a self) -> documents::Documents<'a> { + pub fn documents(&'a self) -> documents::Documents<'a, T> { documents::Documents::new(self.client, self.collection_name) } /// Provides access to the API endpoints for a single document within a Typesense collection. - pub fn document(&'a self, document_id: &'a str) -> document::Document<'a> { + pub fn document(&'a self, document_id: &'a str) -> document::Document<'a, T> { document::Document::new(self.client, self.collection_name, document_id) } diff --git a/typesense/src/client/collection/search_override.rs b/typesense/src/client/collection/search_override.rs index 9805a5d..5648e6d 100644 --- a/typesense/src/client/collection/search_override.rs +++ b/typesense/src/client/collection/search_override.rs @@ -2,7 +2,7 @@ //! //! An instance of `SearchOverride` is created via the `Client::collection("collection_name").search_override("search_override_id")` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{configuration, documents_api}, diff --git a/typesense/src/client/collection/search_overrides.rs b/typesense/src/client/collection/search_overrides.rs index 11151a1..b439358 100644 --- a/typesense/src/client/collection/search_overrides.rs +++ b/typesense/src/client/collection/search_overrides.rs @@ -2,7 +2,7 @@ //! //! An instance of `SearchOverrides` is created via the `Client::collection("collection_name").search_overrides()` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{configuration, documents_api}, diff --git a/typesense/src/client/collection/synonym.rs b/typesense/src/client/collection/synonym.rs index d3b9b05..847b7dd 100644 --- a/typesense/src/client/collection/synonym.rs +++ b/typesense/src/client/collection/synonym.rs @@ -2,7 +2,7 @@ //! //! An instance of `Synonym` is created via the `client.collection("collection_name").synonym("synonym_id")` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{configuration, synonyms_api}, diff --git a/typesense/src/client/collection/synonyms.rs b/typesense/src/client/collection/synonyms.rs index a78300b..513fecc 100644 --- a/typesense/src/client/collection/synonyms.rs +++ b/typesense/src/client/collection/synonyms.rs @@ -2,7 +2,7 @@ //! //! An instance of `Synonyms` is created via the `client.collection("collection_name").synonyms()` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{configuration, synonyms_api}, diff --git a/typesense/src/client/collections.rs b/typesense/src/client/collections.rs index 9378810..0036c3d 100644 --- a/typesense/src/client/collections.rs +++ b/typesense/src/client/collections.rs @@ -2,7 +2,7 @@ //! //! A `Collections` instance is created via the main `client.collections()` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{collections_api, configuration}, diff --git a/typesense/src/client/conversations/mod.rs b/typesense/src/client/conversations/mod.rs index 392070b..8360363 100644 --- a/typesense/src/client/conversations/mod.rs +++ b/typesense/src/client/conversations/mod.rs @@ -3,11 +3,11 @@ //! An `Conversations` instance is created via the main `Client::conversations()` method. use super::Client; -pub use model::Model; -pub use models::Models; +use model::Model; +use models::Models; -pub mod model; -pub mod models; +mod model; +mod models; /// Provides methods for managing Typesense conversation models. /// diff --git a/typesense/src/client/conversations/model.rs b/typesense/src/client/conversations/model.rs index 79540bb..33bf34e 100644 --- a/typesense/src/client/conversations/model.rs +++ b/typesense/src/client/conversations/model.rs @@ -2,7 +2,7 @@ //! //! An instance of `Model` is created via the `Conversations::model()` method. -use crate::client::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{configuration, conversations_api}, diff --git a/typesense/src/client/conversations/models.rs b/typesense/src/client/conversations/models.rs index d4b585d..e093a5b 100644 --- a/typesense/src/client/conversations/models.rs +++ b/typesense/src/client/conversations/models.rs @@ -2,7 +2,7 @@ //! //! An instance of `Models` is created via the `Conversations::models()` method. -use crate::client::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{configuration, conversations_api}, diff --git a/typesense/src/client/key.rs b/typesense/src/client/key.rs index dcd33ef..77f7e27 100644 --- a/typesense/src/client/key.rs +++ b/typesense/src/client/key.rs @@ -2,7 +2,7 @@ //! //! A `Key` instance is created via the `Client::key(key_id)` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{configuration, keys_api}, diff --git a/typesense/src/client/keys.rs b/typesense/src/client/keys.rs index 004ae7c..bd448b4 100644 --- a/typesense/src/client/keys.rs +++ b/typesense/src/client/keys.rs @@ -2,7 +2,7 @@ //! //! An `Keys` instance is created via the `Client::keys()` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{configuration, keys_api}, diff --git a/typesense/src/client/mod.rs b/typesense/src/client/mod.rs index 1613178..4eda539 100644 --- a/typesense/src/client/mod.rs +++ b/typesense/src/client/mod.rs @@ -14,8 +14,7 @@ //! ## Example Usage //! //! ```no_run -//! use typesense::client::{Client, MultiNodeConfiguration}; -//! use typesense_codegen::models; +//! use typesense::{Client, MultiNodeConfiguration, models}; //! use reqwest::Url; //! use reqwest_retry::policies::ExponentialBackoff; //! use std::time::Duration; @@ -50,37 +49,40 @@ //! } //! ``` -pub mod alias; -pub mod aliases; -pub mod analytics; -pub mod collection; -pub mod collections; -pub mod conversations; -pub mod key; -pub mod keys; -pub mod multi_search; -pub mod operations; -pub mod preset; -pub mod presets; -pub mod stemming; -pub mod stopword; -pub mod stopwords; - -pub use alias::Alias; -pub use aliases::Aliases; -pub use analytics::Analytics; -pub use collection::Collection; -pub use collections::Collections; -pub use conversations::Conversations; -pub use key::Key; -pub use keys::Keys; -pub use operations::Operations; -pub use preset::Preset; -pub use presets::Presets; -pub use stemming::Stemming; -pub use stopword::Stopword; -pub use stopwords::Stopwords; - +mod alias; +mod aliases; +mod analytics; +mod collection; +mod collections; +mod conversations; +mod key; +mod keys; +mod multi_search; +mod operations; +mod preset; +mod presets; +mod stemming; +mod stopword; +mod stopwords; + +use alias::Alias; +use aliases::Aliases; +use analytics::Analytics; +use collection::Collection; +use collections::Collections; +use conversations::Conversations; +use key::Key; +use keys::Keys; +use operations::Operations; +use preset::Preset; +use presets::Presets; +use serde::de::DeserializeOwned; +use serde::Serialize; +use stemming::Stemming; +use stopword::Stopword; +use stopwords::Stopwords; + +use crate::Error; use reqwest::Url; use reqwest_middleware::ClientBuilder; use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware}; @@ -90,7 +92,6 @@ use std::sync::{ Arc, Mutex, }; use std::time::{Duration, Instant}; -use thiserror::Error; use typesense_codegen::apis::{self, configuration}; use crate::client::multi_search::MultiSearch; @@ -141,28 +142,6 @@ impl Default for MultiNodeConfiguration { } } -/// The primary error type for the Typesense client. -#[derive(Debug, Error)] -pub enum Error -where - E: std::fmt::Debug + 'static, - apis::Error: std::error::Error + 'static, -{ - /// Indicates that all configured nodes failed to process a request. - #[error("All API nodes failed to respond. Last error: {source}")] - AllNodesFailed { - /// The last underlying API or network error received from a node before giving up. - #[source] - source: apis::Error, - }, - - // Any middleware error will be wrapped in the Api variant below. - /// An API-level error returned by the Typesense server (e.g., 503 Service Unavailable) - /// or a network-level error from the underlying HTTP client (e.g. connection refused). - #[error("A single node failed with an API or network error")] - Api(#[from] apis::Error), -} - /// The main entry point for all interactions with the Typesense API. /// /// The client manages connections to multiple nodes and provides access to different @@ -332,7 +311,7 @@ impl Client { } // If the loop finishes, all nodes have failed. - Err(Error::AllNodesFailed { + Err(crate::Error::AllNodesFailed { source: last_api_error .expect("No nodes were available to try, or all errors were non-retriable."), }) @@ -340,12 +319,9 @@ impl Client { /// Provides access to the collection aliases-related API endpoints. /// /// # Example - /// ```ignore - /// # use typesense::client::{Client, MultiNodeConfiguration}; - /// # use typesense_codegen::models; + /// ```no_run + /// # use typesense::{Client, MultiNodeConfiguration}; /// # use reqwest::Url; - /// # use reqwest_retry::policies::ExponentialBackoff; - /// # use std::time::Duration; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { @@ -365,12 +341,9 @@ impl Client { /// Provides access to a specific collection alias's-related API endpoints. /// # Example - /// ```ignore - /// # use typesense::client::{Client, MultiNodeConfiguration}; - /// # use typesense_codegen::models; + /// ```no_run + /// # use typesense::{Client, MultiNodeConfiguration}; /// # use reqwest::Url; - /// # use reqwest_retry::policies::ExponentialBackoff; - /// # use std::time::Duration; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { @@ -390,12 +363,9 @@ impl Client { /// Provides access to API endpoints for managing collections like `create()` and `retrieve()`. /// # Example - /// ```ignore - /// # use typesense::client::{Client, MultiNodeConfiguration}; - /// # use typesense_codegen::models; + /// ```no_run + /// # use typesense::{Client, MultiNodeConfiguration}; /// # use reqwest::Url; - /// # use reqwest_retry::policies::ExponentialBackoff; - /// # use std::time::Duration; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { @@ -409,43 +379,95 @@ impl Client { /// # Ok(()) /// # } /// ``` - pub fn collections(&self) -> collections::Collections<'_> { - collections::Collections::new(self) + pub fn collections(&self) -> Collections<'_> { + Collections::new(self) } - /// Provides access to API endpoints of a specific collection. - /// # Example - /// ```ignore - /// # use typesense::client::{Client, MultiNodeConfiguration}; - /// # use typesense_codegen::models; + /// Provides access to API endpoints for a specific collection. + /// + /// This method returns a `Collection` handle, which is generic over the type of document + /// stored in that collection. + /// + /// # Type Parameters + /// * `T` - The type of the documents in the collection. It must be serializable and deserializable. + /// **This defaults to `serde_json::Value`**, allowing you to perform collection-level + /// operations (like delete, update, retrieve schema) without specifying a type, + /// or to work with schemaless documents. + /// + /// # Arguments + /// * `collection_name` - The name of the collection to interact with. + /// + /// # Example: Working with a strongly-typed collection + /// + /// When you want to retrieve or search for documents and have them automatically + /// deserialized into your own structs. + /// ```no_run + /// # use typesense::{Client, MultiNodeConfiguration}; + /// # use serde::{Serialize, Deserialize}; /// # use reqwest::Url; - /// # use reqwest_retry::policies::ExponentialBackoff; - /// # use std::time::Duration; /// # - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { + /// # #[derive(Serialize, Deserialize, Debug)] + /// # struct Book { id: String, title: String } + /// # async fn run() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// // Get a typed handle to the "books" collection + /// let books_collection = client.collection_of::("books"); + /// + /// // Retrieve a single book, it returns `Result` + /// let book = books_collection.document("123").retrieve().await?; + /// println!("Retrieved book: {:?}", book); + /// # + /// # Ok(()) + /// # } + /// ``` + pub fn collection_of<'a, T>(&'a self, collection_name: &'a str) -> Collection<'a, T> + where + T: DeserializeOwned + Serialize + Send + Sync, + { + Collection::new(self, collection_name) + } + + /// Provides access to API endpoints for a specific collection using schemaless `serde_json::Value` documents. + /// + /// This is the simplest way to interact with a collection when you do not need strong typing. + /// It is a convenient shorthand for `client.collection_of::("...")`. + /// + /// The returned handle can be used for both document operations (which will return `serde_json::Value`) + /// and collection-level operations (like `.delete()` or `.retrieve()`). + /// + /// # Arguments + /// * `collection_name` - The name of the collection to interact with. + /// + /// # Example + /// ```no_run + /// # use typesense::{Client, MultiNodeConfiguration}; + /// # use reqwest::Url; + /// # async fn run() -> Result<(), Box> { /// # let config = MultiNodeConfiguration { /// # nodes: vec![Url::parse("http://localhost:8108")?], /// # api_key: "xyz".to_string(), /// # ..Default::default() /// # }; /// # let client = Client::new(config)?; - /// let my_collection = client.collection("products").retrieve().await.unwrap(); + /// let products_collection = client.collection("products"); + /// # /// # Ok(()) /// # } /// ``` - pub fn collection<'a>(&'a self, collection_name: &'a str) -> Collection<'a> { + pub fn collection<'a>(&'a self, collection_name: &'a str) -> Collection<'a, serde_json::Value> { Collection::new(self, collection_name) } /// Provides access to the analytics-related API endpoints. /// # Example - /// ```ignore - /// # use typesense::client::{Client, MultiNodeConfiguration}; - /// # use typesense_codegen::models; + /// ```no_run + /// # use typesense::{Client, MultiNodeConfiguration}; /// # use reqwest::Url; - /// # use reqwest_retry::policies::ExponentialBackoff; - /// # use std::time::Duration; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { @@ -465,12 +487,9 @@ impl Client { /// Returns a `Conversations` instance for managing conversation models. /// # Example - /// ```ignore - /// # use typesense::client::{Client, MultiNodeConfiguration}; - /// # use typesense_codegen::models; + /// ```no_run + /// # use typesense::{Client, MultiNodeConfiguration}; /// # use reqwest::Url; - /// # use reqwest_retry::policies::ExponentialBackoff; - /// # use std::time::Duration; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { @@ -490,12 +509,9 @@ impl Client { /// Provides access to top-level, non-namespaced API endpoints like `health` and `debug`. /// # Example - /// ```ignore - /// # use typesense::client::{Client, MultiNodeConfiguration}; - /// # use typesense_codegen::models; + /// ```no_run + /// # use typesense::{Client, MultiNodeConfiguration}; /// # use reqwest::Url; - /// # use reqwest_retry::policies::ExponentialBackoff; - /// # use std::time::Duration; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { @@ -516,12 +532,9 @@ impl Client { /// Provides access to endpoints for managing the collection of API keys. /// /// # Example - /// ```ignore - /// # use typesense::client::{Client, MultiNodeConfiguration}; - /// # use typesense_codegen::models; + /// ```no_run + /// # use typesense::{Client, MultiNodeConfiguration, models}; /// # use reqwest::Url; - /// # use reqwest_retry::policies::ExponentialBackoff; - /// # use std::time::Duration; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { @@ -551,12 +564,9 @@ impl Client { /// * `key_id` - The ID of the key to manage. /// /// # Example - /// ```ignore - /// # use typesense::client::{Client, MultiNodeConfiguration}; - /// # use typesense_codegen::models; + /// ```no_run + /// # use typesense::{Client, MultiNodeConfiguration}; /// # use reqwest::Url; - /// # use reqwest_retry::policies::ExponentialBackoff; - /// # use std::time::Duration; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { @@ -577,12 +587,9 @@ impl Client { /// Provides access to endpoints for managing all of your presets. /// /// # Example - /// ```ignore - /// # use typesense::client::{Client, MultiNodeConfiguration}; - /// # use typesense_codegen::models; + /// ```no_run + /// # use typesense::{Client, MultiNodeConfiguration}; /// # use reqwest::Url; - /// # use reqwest_retry::policies::ExponentialBackoff; - /// # use std::time::Duration; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { @@ -606,12 +613,9 @@ impl Client { /// * `preset_id` - The ID of the preset to manage. /// /// # Example - /// ```ignore - /// # use typesense::client::{Client, MultiNodeConfiguration}; - /// # use typesense_codegen::models; + /// ```no_run + /// # use typesense::{Client, MultiNodeConfiguration}; /// # use reqwest::Url; - /// # use reqwest_retry::policies::ExponentialBackoff; - /// # use std::time::Duration; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { @@ -634,10 +638,8 @@ impl Client { /// # Example /// /// ``` - /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// # use typesense::{Client, MultiNodeConfiguration, models}; /// # use reqwest::Url; - /// # use reqwest_retry::policies::ExponentialBackoff; - /// # use std::time::Duration; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { @@ -660,11 +662,9 @@ impl Client { /// Provides access to endpoints for managing the collection of stopwords sets. /// /// # Example - /// ```ignore - /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// ```no_run + /// # use typesense::{Client, MultiNodeConfiguration, models}; /// # use reqwest::Url; - /// # use reqwest_retry::policies::ExponentialBackoff; - /// # use std::time::Duration; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { @@ -688,11 +688,9 @@ impl Client { /// * `set_id` - The ID of the stopwords set to manage. /// /// # Example - /// ```ignore - /// # use typesense::client::{Client, MultiNodeConfiguration}; + /// ```no_run + /// # use typesense::{Client, MultiNodeConfiguration, models}; /// # use reqwest::Url; - /// # use reqwest_retry::policies::ExponentialBackoff; - /// # use std::time::Duration; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { @@ -713,12 +711,9 @@ impl Client { /// Provides access to the multi search endpoint. /// /// # Example - /// ```ignore - /// # use typesense::client::{Client, MultiNodeConfiguration}; - /// # use typesense_codegen::models; + /// ```no_run + /// # use typesense::{Client, MultiNodeConfiguration, models}; /// # use reqwest::Url; - /// # use reqwest_retry::policies::ExponentialBackoff; - /// # use std::time::Duration; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { diff --git a/typesense/src/client/multi_search.rs b/typesense/src/client/multi_search.rs index 54e3f1a..8f6920a 100644 --- a/typesense/src/client/multi_search.rs +++ b/typesense/src/client/multi_search.rs @@ -2,7 +2,7 @@ //! //! A `MultiSearch` instance is created via the main `Client::multi_search()` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{ diff --git a/typesense/src/client/operations.rs b/typesense/src/client/operations.rs index 152022c..60a7721 100644 --- a/typesense/src/client/operations.rs +++ b/typesense/src/client/operations.rs @@ -2,7 +2,7 @@ //! //! An `Operations` instance is created via the main `Client::operations()` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{ diff --git a/typesense/src/client/preset.rs b/typesense/src/client/preset.rs index 652f632..659b7d3 100644 --- a/typesense/src/client/preset.rs +++ b/typesense/src/client/preset.rs @@ -2,7 +2,7 @@ //! //! A `Preset` instance is created via the main `Client::preset(id)` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{configuration, presets_api}, diff --git a/typesense/src/client/presets.rs b/typesense/src/client/presets.rs index ee82ed0..29f1966 100644 --- a/typesense/src/client/presets.rs +++ b/typesense/src/client/presets.rs @@ -4,7 +4,7 @@ //! //! A `Presets` instance is created via the main `Client::presets()` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{configuration, presets_api}, diff --git a/typesense/src/client/stopword.rs b/typesense/src/client/stopword.rs index 6917238..86800a8 100644 --- a/typesense/src/client/stopword.rs +++ b/typesense/src/client/stopword.rs @@ -2,7 +2,7 @@ //! //! An instance of `Stopword` is created via the `Client::stopword()` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{configuration, stopwords_api}, diff --git a/typesense/src/client/stopwords.rs b/typesense/src/client/stopwords.rs index dbbb9b0..d9374e6 100644 --- a/typesense/src/client/stopwords.rs +++ b/typesense/src/client/stopwords.rs @@ -2,7 +2,7 @@ //! //! A `Stopwords` instance is created via the main `Client::stopwords()` method. -use super::{Client, Error}; +use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ apis::{configuration, stopwords_api}, diff --git a/typesense/src/error.rs b/typesense/src/error.rs new file mode 100644 index 0000000..dd54c2c --- /dev/null +++ b/typesense/src/error.rs @@ -0,0 +1,54 @@ +use thiserror::Error; +pub use typesense_codegen::apis::Error as ApiError; + +/// The primary error type for the Typesense client. +/// +/// This enum encapsulates all possible failures, from network issues to API errors +/// returned by the Typesense server, to client-side data handling problems. +/// +/// The generic parameter `E` represents the specific error type associated with a +/// particular API operation (e.g., `SearchCollectionError`, `GetDocumentError`). +#[derive(Debug, Error)] +pub enum Error +where + E: std::fmt::Debug + 'static, + ApiError: std::error::Error + 'static, +{ + /// Occurs when an operation fails against all configured Typesense nodes. + /// + /// This error is only returned when using a client configured with multiple nodes. + /// It signifies that the client attempted the operation against each node in turn, + /// and every attempt failed. The user should check the health and connectivity + /// of all their Typesense nodes. + /// + /// The `source` field contains the error from the *last* node that was attempted. + #[error("All configured Typesense nodes failed to respond. Last error: {source}")] + AllNodesFailed { + /// The underlying API or network error from the last node attempt. + #[source] + source: ApiError, + }, + + /// Wraps an error returned by the Typesense API or the underlying network stack. + /// + /// This can be due to: + /// - A server-side issue (e.g., HTTP 5xx errors). + /// - A client-side mistake (e.g., HTTP 4xx errors like `404 Not Found` or `401 Unauthorized`). + /// - A network connectivity problem (e.g., connection refused, timeout, DNS failure). + /// + /// You should inspect the wrapped error to get specific details about the HTTP status code and response body. + #[error("An API or network error occurred: {0}")] + Api(#[from] ApiError), + + /// Occurs when the JSON response from Typesense cannot be deserialized into the target Rust struct. + /// + /// This typically signifies a mismatch between the data in your Typesense collection + /// and the fields or data types defined in your Rust struct (`T`). + /// + /// **To debug this, check for:** + /// - A field that exists in Typesense but not in your struct (unless your struct ignores unknown fields). + /// - A field in your struct that doesn't exist in the Typesense document and is not wrapped in an `Option`. + /// - A type mismatch (e.g., a Typesense `string` field that you are trying to deserialize into a `u64`). + #[error("Failed to deserialize the API response into the target struct: {0}")] + Deserialization(#[from] serde_json::Error), +} diff --git a/typesense/src/lib.rs b/typesense/src/lib.rs index b0a85fa..57cc873 100644 --- a/typesense/src/lib.rs +++ b/typesense/src/lib.rs @@ -43,13 +43,17 @@ //! } //! ``` -pub mod client; +mod client; +mod error; + pub mod collection_schema; pub mod document; pub mod field; -pub mod keys; +// pub mod keys; +pub mod models; -pub use typesense_codegen::*; +pub use client::{Client, MultiNodeConfiguration}; +pub use error::{ApiError, Error}; #[cfg(feature = "typesense_derive")] #[doc(hidden)] diff --git a/typesense/src/models/mod.rs b/typesense/src/models/mod.rs new file mode 100644 index 0000000..c200ccd --- /dev/null +++ b/typesense/src/models/mod.rs @@ -0,0 +1,111 @@ +//! # Typesense generic models +pub mod search; +pub use search::*; +pub use typesense_codegen::models::{ + AnalyticsEventCreateResponse, AnalyticsEventCreateSchema, AnalyticsRuleDeleteResponse, + AnalyticsRuleParameters, AnalyticsRuleParametersDestination, AnalyticsRuleParametersSource, + AnalyticsRuleParametersSourceEventsInner, AnalyticsRuleSchema, AnalyticsRuleUpsertSchema, + AnalyticsRulesRetrieveSchema, ApiKey, ApiKeyDeleteResponse, ApiKeySchema, ApiKeysResponse, + ApiResponse, ApiStatsResponse, CollectionAlias, CollectionAliasSchema, + CollectionAliasesResponse, CollectionResponse, CollectionSchema, CollectionUpdateSchema, + ConversationModelCreateSchema, ConversationModelSchema, ConversationModelUpdateSchema, + Debug200Response, DeleteDocuments200Response, DeleteDocumentsParameters, + DeleteStopwordsSet200Response, DirtyValues, DocumentIndexParameters, DropTokensMode, + ErrorResponse, ExportDocumentsParameters, FacetCounts, FacetCountsCountsInner, + FacetCountsStats, Field, FieldEmbed, FieldEmbedModelConfig, HealthStatus, + ImportDocumentsParameters, IndexAction, ListStemmingDictionaries200Response, + MultiSearchCollectionParameters, MultiSearchParameters, MultiSearchResult, + MultiSearchResultItem, MultiSearchSearchesParameter, NlSearchModelBase, + NlSearchModelCreateSchema, NlSearchModelDeleteSchema, NlSearchModelSchema, PresetDeleteSchema, + PresetSchema, PresetUpsertSchema, PresetUpsertSchemaValue, PresetsRetrieveSchema, + SchemaChangeStatus, ScopedKeyParameters, SearchGroupedHit, SearchHighlight, SearchOverride, + SearchOverrideDeleteResponse, SearchOverrideExclude, SearchOverrideInclude, SearchOverrideRule, + SearchOverrideSchema, SearchOverridesResponse, SearchParameters, SearchResultConversation, + SearchResultHitTextMatchInfo, SearchResultRequestParams, SearchResultRequestParamsVoiceQuery, + SearchSynonym, SearchSynonymDeleteResponse, SearchSynonymSchema, SearchSynonymsResponse, + SnapshotParameters, StemmingDictionary, StemmingDictionaryWordsInner, + StopwordsSetRetrieveSchema, StopwordsSetSchema, StopwordsSetUpsertSchema, + StopwordsSetsRetrieveAllSchema, SuccessStatus, UpdateDocuments200Response, + UpdateDocumentsParameters, VoiceQueryModelCollectionConfig, +}; +// Only re-export the sub modules that have enums inside them. +pub use typesense_codegen::models::{ + analytics_rule_schema::Type as AnalyticsRulesType, // analytics_rule_upsert_schema, + api_key, + api_key_delete_response, + api_key_schema, + api_keys_response, + api_response, + api_stats_response, + collection_alias, + collection_alias_schema, + collection_aliases_response, + collection_response, + collection_schema, + collection_update_schema, + conversation_model_create_schema, + conversation_model_schema, + conversation_model_update_schema, + debug_200_response, + delete_documents_200_response, + delete_documents_parameters, + delete_stopwords_set_200_response, + document_index_parameters, + drop_tokens_mode, + error_response, + export_documents_parameters, + facet_counts, + facet_counts_counts_inner, + facet_counts_stats, + field, + field_embed, + field_embed_model_config, + health_status, + import_documents_parameters, + index_action, + list_stemming_dictionaries_200_response, + multi_search_collection_parameters, + multi_search_parameters, + multi_search_result, + multi_search_result_item, + multi_search_searches_parameter, + nl_search_model_base, + nl_search_model_create_schema, + nl_search_model_delete_schema, + nl_search_model_schema, + preset_delete_schema, + preset_schema, + preset_upsert_schema, + preset_upsert_schema_value, + presets_retrieve_schema, + schema_change_status, + scoped_key_parameters, + search_grouped_hit, + search_highlight, + search_override, + search_override_delete_response, + search_override_exclude, + search_override_include, + search_override_rule, + search_override_schema, + search_overrides_response, + search_parameters, + search_result_conversation, + search_result_hit_text_match_info, + search_result_request_params, + search_result_request_params_voice_query, + search_synonym, + search_synonym_delete_response, + search_synonym_schema, + search_synonyms_response, + snapshot_parameters, + stemming_dictionary, + stemming_dictionary_words_inner, + stopwords_set_retrieve_schema, + stopwords_set_schema, + stopwords_set_upsert_schema, + stopwords_sets_retrieve_all_schema, + success_status, + update_documents_200_response, + update_documents_parameters, +}; diff --git a/typesense/src/models/search.rs b/typesense/src/models/search.rs new file mode 100644 index 0000000..e71d16b --- /dev/null +++ b/typesense/src/models/search.rs @@ -0,0 +1,144 @@ +//! Contains the generic `SearchResult` and `SearchResultHit` structs + +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use typesense_codegen::models as raw_models; + +/// Represents a single search result hit, with the document deserialized into a strongly-typed struct `T`. +/// +/// This struct is generic over the document type `T`, which must be deserializable from JSON. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +// Add this line to help the derive macro with the generic bound. +#[serde(bound(serialize = "T: Serialize", deserialize = "T: DeserializeOwned"))] +pub struct SearchResultHit { + /// (Deprecated) Contains highlighted portions of the search fields + #[serde(rename = "highlights", skip_serializing_if = "Option::is_none")] + pub highlights: Option>, + + /// Highlighted version of the matching document + #[serde(rename = "highlight", skip_serializing_if = "Option::is_none")] + pub highlight: Option>, + + /// The full document that was matched, deserialized into type `T`. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub document: Option, + + /// The score of the text match. + #[serde(rename = "text_match", skip_serializing_if = "Option::is_none")] + pub text_match: Option, + + /// Detailed information about the text match. + #[serde(rename = "text_match_info", skip_serializing_if = "Option::is_none")] + pub text_match_info: Option>, + + /// Can be any key-value pair + #[serde( + rename = "geo_distance_meters", + skip_serializing_if = "Option::is_none" + )] + pub geo_distance_meters: Option>, + + /// Distance between the query vector and matching document's vector value + #[serde(rename = "vector_distance", skip_serializing_if = "Option::is_none")] + pub vector_distance: Option, +} + +/// Represents the full response from a Typesense search query, containing strongly-typed hits. +/// +/// This struct is generic over the document type `T`. It is the return type of the +/// `documents().search()` method. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +// Add this line to help the derive macro with the generic bound. +#[serde(bound(serialize = "T: Serialize", deserialize = "T: DeserializeOwned"))] +pub struct SearchResult { + /// The search result hits, with documents deserialized into type `T`. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hits: Option>>, + + /// The number of documents found. + #[serde(skip_serializing_if = "Option::is_none")] + pub found: Option, + + /// The number of documents that matched the search query. + #[serde(rename = "found_docs", skip_serializing_if = "Option::is_none")] + pub found_docs: Option, + + /// The total number of documents in the collection. + #[serde(skip_serializing_if = "Option::is_none")] + pub out_of: Option, + + /// The search result page number. + #[serde(skip_serializing_if = "Option::is_none")] + pub page: Option, + + /// The number of milliseconds the search took. + #[serde(skip_serializing_if = "Option::is_none")] + pub search_time_ms: Option, + + /// Counts of values for each facet field. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub facet_counts: Option>, + + /// Results grouped by a field. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub grouped_hits: Option>, + + /// Whether the search was cut off. + #[serde(skip_serializing_if = "Option::is_none")] + pub search_cutoff: Option, + + /// The request parameters that were used for this search. + #[serde(skip_serializing_if = "Option::is_none")] + pub request_params: Option>, + + /// Conversation object for conversational search. + #[serde(skip_serializing_if = "Option::is_none")] + pub conversation: Option>, +} + +impl SearchResult +where + T: DeserializeOwned, +{ + /// Transforms a raw, non-generic `SearchResult` from the API into a strongly-typed `SearchResult`. + pub(crate) fn from_raw( + raw_result: raw_models::SearchResult, + ) -> Result { + let typed_hits = match raw_result.hits { + Some(raw_hits) => { + let mut hits = Vec::with_capacity(raw_hits.len()); + for raw_hit in raw_hits { + let document: Option = match raw_hit.document { + Some(doc_value) => Some(serde_json::from_value(doc_value)?), + None => None, + }; + + hits.push(SearchResultHit { + document, + highlights: raw_hit.highlights, + highlight: raw_hit.highlight, + text_match: raw_hit.text_match, + text_match_info: raw_hit.text_match_info, + geo_distance_meters: raw_hit.geo_distance_meters, + vector_distance: raw_hit.vector_distance, + }); + } + Some(hits) + } + None => None, + }; + + Ok(SearchResult { + found_docs: raw_result.found_docs, + hits: typed_hits, + facet_counts: raw_result.facet_counts, + found: raw_result.found, + out_of: raw_result.out_of, + page: raw_result.page, + search_time_ms: raw_result.search_time_ms, + grouped_hits: raw_result.grouped_hits, + search_cutoff: raw_result.search_cutoff, + request_params: raw_result.request_params, + conversation: raw_result.conversation, + }) + } +} diff --git a/typesense/tests/client/aliases_test.rs b/typesense/tests/client/aliases_test.rs index 4a3c6b9..edfbae3 100644 --- a/typesense/tests/client/aliases_test.rs +++ b/typesense/tests/client/aliases_test.rs @@ -1,4 +1,4 @@ -use typesense_codegen::models::{CollectionAliasSchema, CollectionSchema, Field}; +use typesense::models::{CollectionAliasSchema, CollectionSchema, Field}; use super::{get_client, new_id}; diff --git a/typesense/tests/client/analytics_test.rs b/typesense/tests/client/analytics_test.rs index 4d14fdd..1854639 100644 --- a/typesense/tests/client/analytics_test.rs +++ b/typesense/tests/client/analytics_test.rs @@ -1,11 +1,10 @@ use super::{get_client, new_id}; use serde_json::json; -use typesense::models::analytics_rule_schema::Type::Counter; use typesense::models::{ self, AnalyticsEventCreateSchema, AnalyticsRuleParametersDestination, AnalyticsRuleParametersSource, AnalyticsRuleParametersSourceEventsInner, AnalyticsRuleSchema, + AnalyticsRulesType::Counter, CollectionSchema, Field, }; -use typesense_codegen::models::{CollectionSchema, Field}; #[tokio::test] async fn test_analytics_rules_and_events_lifecycle() { diff --git a/typesense/tests/client/client_test.rs b/typesense/tests/client/client_test.rs index 626d22c..1cfae11 100644 --- a/typesense/tests/client/client_test.rs +++ b/typesense/tests/client/client_test.rs @@ -1,8 +1,8 @@ use reqwest::Url; use reqwest_retry::policies::ExponentialBackoff; use std::time::Duration; -use typesense::client::*; use typesense::models::CollectionResponse; +use typesense::*; use wiremock::matchers::{header, method, path}; use wiremock::{Mock, MockServer, ResponseTemplate}; diff --git a/typesense/tests/client/collections_test.rs b/typesense/tests/client/collections_test.rs index f327233..e9e374c 100644 --- a/typesense/tests/client/collections_test.rs +++ b/typesense/tests/client/collections_test.rs @@ -1,4 +1,4 @@ -use typesense_codegen::models::{CollectionSchema, CollectionUpdateSchema, Field}; +use typesense::models::{CollectionSchema, CollectionUpdateSchema, Field}; use super::{get_client, new_id}; diff --git a/typesense/tests/client/conversation_models_test.rs b/typesense/tests/client/conversation_models_test.rs index b2d63af..5e58fca 100644 --- a/typesense/tests/client/conversation_models_test.rs +++ b/typesense/tests/client/conversation_models_test.rs @@ -2,10 +2,11 @@ use std::time::Duration; use reqwest_retry::policies::ExponentialBackoff; use typesense::{ - client::{Error as TypesenseError, MultiNodeConfiguration}, - models::ConversationModelUpdateSchema, + models::{ + CollectionSchema, ConversationModelCreateSchema, ConversationModelUpdateSchema, Field, + }, + Error as TypesenseError, MultiNodeConfiguration, }; -use typesense_codegen::models::{CollectionSchema, ConversationModelCreateSchema, Field}; use super::{get_client, new_id}; @@ -73,7 +74,7 @@ async fn test_create_model_with_invalid_key_fails_as_expected() { ); match create_result.err() { Some(TypesenseError::Api(response_content)) => match response_content { - typesense::apis::Error::ResponseError(api_error) => { + typesense::ApiError::ResponseError(api_error) => { assert_eq!( api_error.status.as_u16(), 400, @@ -108,7 +109,7 @@ async fn test_create_model_with_invalid_key_fails_as_expected() { ); } -use typesense::client::Client; +use typesense::Client; use wiremock::{ matchers::{body_json, method, path}, Mock, MockServer, ResponseTemplate, diff --git a/typesense/tests/client/documents_test.rs b/typesense/tests/client/documents_test.rs index 1f9c57e..80673bb 100644 --- a/typesense/tests/client/documents_test.rs +++ b/typesense/tests/client/documents_test.rs @@ -1,8 +1,9 @@ +use serde::{Deserialize, Serialize}; use serde_json::json; -use typesense::models::IndexAction; -use typesense_codegen::models::{ - CollectionSchema, DeleteDocumentsParameters, ExportDocumentsParameters, Field, - ImportDocumentsParameters, SearchParameters, UpdateDocumentsParameters, +use typesense::models::{ + CollectionSchema, DeleteDocumentsParameters, DirtyValues, DocumentIndexParameters, + ExportDocumentsParameters, Field, ImportDocumentsParameters, IndexAction, SearchParameters, + UpdateDocumentsParameters, }; use super::{get_client, new_id}; @@ -59,11 +60,11 @@ async fn test_document_lifecycle() { let documents_client = collection_client.documents(); // --- 2. Create a document (via `documents().create()`) --- - let create_res = documents_client.create(book_1.clone()).await; + let create_res = documents_client.create(&book_1, None).await; assert!(create_res.is_ok(), "Failed to create document 1"); // --- 3. Upsert a document (via `documents().upsert()`) --- - let upsert_res = documents_client.upsert(book_2.clone()).await; + let upsert_res = documents_client.upsert(&book_2, None).await; assert!(upsert_res.is_ok(), "Failed to upsert document 2"); // --- 4. Retrieve a single document (via `document(id).retrieve()`) --- @@ -90,7 +91,7 @@ async fn test_document_lifecycle() { let update_res = client .collection(&collection_name) .document(book_1_id) - .update(partial_update) + .update(&partial_update, None) .await; assert!(update_res.is_ok(), "Failed to update document 1"); @@ -141,9 +142,6 @@ async fn test_document_lifecycle() { .await; assert!(import_res.is_ok(), "Bulk import failed"); - // Give Typesense a moment to index - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - // --- 11. Verify Import via Search --- let search_after_import_params = SearchParameters { q: Some("*".to_string()), @@ -167,9 +165,6 @@ async fn test_document_lifecycle() { // Should update Lord of the Rings (1954) and Foundation (1951) assert_eq!(bulk_update_res.unwrap().num_updated, 2); - // Give Typesense a moment to index - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - // --- 13. Export documents (via `documents().export()`) --- let export_params = ExportDocumentsParameters { filter_by: Some("author:\"Sci-Fi Pioneer\"".to_string()), @@ -195,11 +190,154 @@ async fn test_document_lifecycle() { assert!(bulk_delete_res.is_ok(), "Bulk delete failed"); // Only "Dune" (1965) should be deleted assert_eq!(bulk_delete_res.unwrap().num_deleted, 1); +} + +// --- TESTS FOR GENERIC FEATURES --- + +/// A strongly-typed representation of a book document. +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +struct Book { + id: String, + title: String, + author: String, + publication_year: i32, + #[serde(default, skip_serializing_if = "Option::is_none")] + in_stock: Option, +} + +#[tokio::test] +async fn test_generic_document_lifecycle() { + let client = get_client(); + let collection_name = new_id("generic_books"); + + // --- 1. Setup: Create a Collection matching the Book struct --- + let schema = CollectionSchema { + name: collection_name.clone(), + fields: vec![ + Field { + name: "title".to_string(), + r#type: "string".to_string(), + ..Default::default() + }, + Field { + name: "author".to_string(), + r#type: "string".to_string(), + facet: Some(true), + ..Default::default() + }, + Field { + name: "publication_year".to_string(), + r#type: "int32".to_string(), + ..Default::default() + }, + Field { + name: "in_stock".to_string(), + r#type: "bool".to_string(), + optional: Some(true), + ..Default::default() + }, + ], + ..Default::default() + }; - // --- 15. Teardown: Delete the collection --- - let delete_collection_result = client.collection(&collection_name).delete().await; + let create_collection_result = client.collections().create(schema).await; assert!( - delete_collection_result.is_ok(), - "Failed to delete collection" + create_collection_result.is_ok(), + "Failed to create collection for generic test" + ); + + // Use the strongly-typed collection client + let typed_collection = client.collection_of::(&collection_name); + + let book_1 = Book { + id: new_id("book_1"), + title: "Dune".to_string(), + author: "Frank Herbert".to_string(), + publication_year: 1965, + in_stock: Some(true), + }; + + let book_2 = Book { + id: new_id("book_2"), + title: "Foundation".to_string(), + author: "Isaac Asimov".to_string(), + publication_year: 1951, + in_stock: Some(false), + }; + + // --- 2. Create a document using a typed struct --- + let create_res = typed_collection.documents().create(&book_1, None).await; + assert!(create_res.is_ok(), "Failed to create typed document"); + // The created document should be returned and be equal to the input + assert_eq!(create_res.unwrap(), book_1); + + // --- 3. Upsert a document using a typed struct --- + let upsert_res = typed_collection.documents().upsert(&book_2, None).await; + assert!(upsert_res.is_ok(), "Failed to upsert typed document"); + assert_eq!(upsert_res.unwrap(), book_2); + + // --- 4. Retrieve a single document and deserialize into a struct --- + let retrieve_res = typed_collection.document(&book_1.id).retrieve().await; + assert!(retrieve_res.is_ok(), "Failed to retrieve typed document"); + assert_eq!(retrieve_res.unwrap(), book_1); + + // --- 5. Search for documents with strongly-typed results --- + let search_params = SearchParameters { + q: Some("dune".to_string()), + query_by: Some("title".to_string()), + ..Default::default() + }; + let search_res = typed_collection.documents().search(search_params).await; + assert!(search_res.is_ok(), "Typed search failed"); + let search_results = search_res.unwrap(); + + assert_eq!(search_results.found, Some(1)); + let hits = search_results.hits.expect("Search should have hits"); + assert_eq!(hits.len(), 1); + // The document within the hit should be the deserialized Book struct + let hit_doc = hits[0] + .document + .as_ref() + .expect("Hit should contain a document"); + assert_eq!(hit_doc, &book_1); + + // --- 6. Update a single document with a partial payload --- + #[derive(Serialize)] + struct BookUpdate { + publication_year: i32, + in_stock: bool, + } + let partial_update_struct = BookUpdate { + publication_year: 1966, + in_stock: false, + }; + let index_params = DocumentIndexParameters { + dirty_values: Some(DirtyValues::CoerceOrReject), + }; + let update_res = typed_collection + .document(&book_1.id) + .update(&partial_update_struct, Some(index_params)) + .await; + assert!(update_res.is_ok(), "Failed to update typed document"); + + // The returned document should be the full, updated Book struct + let updated_book = update_res.unwrap(); + assert_eq!(updated_book.publication_year, 1966); + assert_eq!(updated_book.in_stock, Some(false)); + assert_eq!(updated_book.title, book_1.title); // Other fields are preserved + + // --- 7. Delete a single document, receiving the typed struct back --- + let delete_res = typed_collection.document(&book_1.id).delete().await; + assert!(delete_res.is_ok(), "Failed to delete typed document"); + // The deleted document (in its state just before deletion) is returned + let deleted_book = delete_res.unwrap(); + assert_eq!(deleted_book.id, book_1.id); + assert_eq!(deleted_book.publication_year, 1966); // It was the updated version + + // --- 8. Verify single deletion --- + let retrieve_after_delete_res = typed_collection.document(&book_1.id).retrieve().await; + assert!( + retrieve_after_delete_res.is_err(), + "Typed document should not exist after deletion" ); } diff --git a/typesense/tests/client/keys_test.rs b/typesense/tests/client/keys_test.rs index 1e40569..7af025f 100644 --- a/typesense/tests/client/keys_test.rs +++ b/typesense/tests/client/keys_test.rs @@ -1,5 +1,5 @@ use super::get_client; -use typesense_codegen::models::ApiKeySchema; +use typesense::models::ApiKeySchema; #[tokio::test] async fn test_keys_lifecycle() { diff --git a/typesense/tests/client/mod.rs b/typesense/tests/client/mod.rs index 0e77bda..078819b 100644 --- a/typesense/tests/client/mod.rs +++ b/typesense/tests/client/mod.rs @@ -16,7 +16,7 @@ use reqwest::Url; use reqwest_retry::policies::ExponentialBackoff; use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; -use typesense::client::{Client, MultiNodeConfiguration}; +use typesense::{Client, MultiNodeConfiguration}; /// Helper function to create a new client for all tests in this suite. pub fn get_client() -> Client { diff --git a/typesense/tests/client/multi_search_test.rs b/typesense/tests/client/multi_search_test.rs index ce5e2b1..c3477e6 100644 --- a/typesense/tests/client/multi_search_test.rs +++ b/typesense/tests/client/multi_search_test.rs @@ -1,4 +1,4 @@ -use typesense_codegen::models::{ +use typesense::models::{ CollectionSchema, Field, ImportDocumentsParameters, MultiSearchCollectionParameters, MultiSearchParameters, MultiSearchSearchesParameter, }; @@ -6,7 +6,7 @@ use typesense_codegen::models::{ use super::{get_client, new_id}; async fn setup_multi_search_tests( - client: &typesense::client::Client, + client: &typesense::Client, products_collection_name: &str, brands_collection_name: &str, ) { diff --git a/typesense/tests/client/presets_test.rs b/typesense/tests/client/presets_test.rs index 39bc67f..ec4b492 100644 --- a/typesense/tests/client/presets_test.rs +++ b/typesense/tests/client/presets_test.rs @@ -1,4 +1,4 @@ -use typesense_codegen::models::{ +use typesense::models::{ PresetSchema, PresetUpsertSchema, PresetUpsertSchemaValue, SearchParameters, }; diff --git a/typesense/tests/client/search_overrides_test.rs b/typesense/tests/client/search_overrides_test.rs index e67376f..8b61a0b 100644 --- a/typesense/tests/client/search_overrides_test.rs +++ b/typesense/tests/client/search_overrides_test.rs @@ -1,4 +1,4 @@ -use typesense_codegen::models::{ +use typesense::models::{ CollectionSchema, Field, SearchOverrideInclude, SearchOverrideRule, SearchOverrideSchema, }; diff --git a/typesense/tests/client/stopwords_test.rs b/typesense/tests/client/stopwords_test.rs index f4d5580..da863f4 100644 --- a/typesense/tests/client/stopwords_test.rs +++ b/typesense/tests/client/stopwords_test.rs @@ -1,4 +1,4 @@ -use typesense_codegen::models::StopwordsSetUpsertSchema; +use typesense::models::StopwordsSetUpsertSchema; use super::{get_client, new_id}; diff --git a/typesense/tests/client/synonyms_test.rs b/typesense/tests/client/synonyms_test.rs index a6141d5..d021413 100644 --- a/typesense/tests/client/synonyms_test.rs +++ b/typesense/tests/client/synonyms_test.rs @@ -1,4 +1,4 @@ -use typesense_codegen::models::{CollectionSchema, Field, SearchSynonymSchema}; +use typesense::models::{CollectionSchema, Field, SearchSynonymSchema}; use super::{get_client, new_id}; From 8996804584008f9da70bd398cf1f58a793d2ba70 Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Tue, 29 Jul 2025 21:59:48 +0700 Subject: [PATCH 07/21] rename file --- typesense/src/models/mod.rs | 4 ++-- typesense/src/models/{search.rs => search_result.rs} | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename typesense/src/models/{search.rs => search_result.rs} (100%) diff --git a/typesense/src/models/mod.rs b/typesense/src/models/mod.rs index c200ccd..547d840 100644 --- a/typesense/src/models/mod.rs +++ b/typesense/src/models/mod.rs @@ -1,6 +1,6 @@ //! # Typesense generic models -pub mod search; -pub use search::*; +pub mod search_result; +pub use search_result::*; pub use typesense_codegen::models::{ AnalyticsEventCreateResponse, AnalyticsEventCreateSchema, AnalyticsRuleDeleteResponse, AnalyticsRuleParameters, AnalyticsRuleParametersDestination, AnalyticsRuleParametersSource, diff --git a/typesense/src/models/search.rs b/typesense/src/models/search_result.rs similarity index 100% rename from typesense/src/models/search.rs rename to typesense/src/models/search_result.rs From 579d4b269840eabc758d81b77227dd598308b6eb Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Wed, 30 Jul 2025 21:26:24 +0700 Subject: [PATCH 08/21] feat: generate scoped searchkey --- typesense/src/client/keys.rs | 34 ++++++-- typesense/src/keys.rs | 83 ------------------- typesense/src/models/mod.rs | 9 +- typesense/src/models/scoped_key_parameters.rs | 25 ++++++ typesense/tests/client/keys_test.rs | 41 ++++++++- 5 files changed, 100 insertions(+), 92 deletions(-) delete mode 100644 typesense/src/keys.rs create mode 100644 typesense/src/models/scoped_key_parameters.rs diff --git a/typesense/src/client/keys.rs b/typesense/src/client/keys.rs index bd448b4..18906c8 100644 --- a/typesense/src/client/keys.rs +++ b/typesense/src/client/keys.rs @@ -2,12 +2,15 @@ //! //! An `Keys` instance is created via the `Client::keys()` method. -use crate::{Client, Error}; -use std::sync::Arc; -use typesense_codegen::{ - apis::{configuration, keys_api}, - models, +use crate::{ + models::{self, ScopedKeyParameters}, + Client, Error, }; +use base64::{engine::general_purpose::STANDARD as Base64Engine, Engine}; +use hmac::{Hmac, Mac}; +use sha2::Sha256; +use std::sync::Arc; +use typesense_codegen::apis::{configuration, keys_api}; /// Provides methods for managing a collection of Typesense API keys. /// @@ -52,4 +55,25 @@ impl<'a> Keys<'a> { }) .await } + + /// Generate a scoped search API key that can have embedded search parameters in them. + /// + /// More info [here](https://typesense.org/docs/latest/api/api-keys.html#generate-scoped-search-key). + pub fn generate_scoped_search_key( + &self, + key: impl AsRef, + params: &ScopedKeyParameters, + ) -> anyhow::Result { + let params = serde_json::to_string(params)?; + + let mut mac = Hmac::::new_from_slice(key.as_ref().as_bytes())?; + mac.update(params.as_bytes()); + let result = mac.finalize(); + let digest = Base64Engine.encode(result.into_bytes()); + + let key_prefix = &key.as_ref()[0..4]; + let raw_scoped_key = format!("{}{}{}", digest, key_prefix, params); + + Ok(Base64Engine.encode(raw_scoped_key.as_bytes())) + } } diff --git a/typesense/src/keys.rs b/typesense/src/keys.rs deleted file mode 100644 index 14ece65..0000000 --- a/typesense/src/keys.rs +++ /dev/null @@ -1,83 +0,0 @@ -//! Module containing everything related to Keys API. -//! -//! More info [here](https://typesense.org/docs/0.20.0/api/api-keys.html). - -use base64::{engine::general_purpose::STANDARD as Base64Engine, Engine}; -use core::fmt; -use hmac::{Hmac, Mac}; -use serde::{Deserialize, Serialize}; -use sha2::Sha256; -use typesense_codegen::models::ScopedKeyParameters; - -/// Generate a scoped search API key that can have embedded search parameters in them. -/// -/// More info [here](https://typesense.org/docs/0.20.0/api/api-keys.html#generate-scoped-search-key). -pub async fn generate_scoped_search_key( - key: impl AsRef, - filter_by: impl Into, - expires_at: i64, -) -> anyhow::Result { - let generate_scoped_search_key = ScopedKeyParameters { - filter_by: Some(filter_by.into()), - expires_at: Some(expires_at), - }; - let params = serde_json::to_string(&generate_scoped_search_key)?; - - let mut mac = Hmac::::new_from_slice(key.as_ref().as_bytes())?; - mac.update(params.as_bytes()); - let result = mac.finalize(); - let digest = Base64Engine.encode(result.into_bytes()); - - let key_prefix = &key.as_ref()[0..4]; - let raw_scoped_key = format!("{}{}{}", digest, key_prefix, params); - - Ok(Base64Engine.encode(raw_scoped_key.as_bytes())) -} - -/// Enum over the possible list of Actions. -/// -/// More info [here](https://typesense.org/docs/0.25.2/api/api-keys.html#sample-actions). -#[derive(Serialize, Deserialize)] -pub enum Actions { - /// Allows only search requests. - #[serde(rename = "documents:search")] - DocumentsSearch, - - /// Allows fetching a single document. - #[serde(rename = "documents:get")] - DocumentsGet, - - /// Allow all kinds of collection related operations. - #[serde(rename = "documents:*")] - DocumentsAll, - - /// Allows a collection to be deleted. - #[serde(rename = "collections:delete")] - CollectionsDelete, - - /// Allows a collection to be created. - #[serde(rename = "collections:create")] - CollectionsCreate, - - /// Allow all kinds of collection related operations. - #[serde(rename = "collections:*")] - CollectionsAll, - - /// Allows all operations. - #[serde(rename = "*")] - All, -} - -impl fmt::Display for Actions { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::DocumentsAll => write!(f, "documents:*"), - Self::DocumentsSearch => write!(f, "documents:search"), - Self::DocumentsGet => write!(f, "documents:get"), - Self::CollectionsAll => write!(f, "collections:*"), - Self::CollectionsDelete => write!(f, "collections:delete"), - Self::CollectionsCreate => write!(f, "collections:create"), - Self::All => write!(f, "*"), - } - } -} diff --git a/typesense/src/models/mod.rs b/typesense/src/models/mod.rs index 547d840..9c13056 100644 --- a/typesense/src/models/mod.rs +++ b/typesense/src/models/mod.rs @@ -1,6 +1,10 @@ //! # Typesense generic models -pub mod search_result; +mod scoped_key_parameters; +mod search_result; + +pub use scoped_key_parameters::*; pub use search_result::*; + pub use typesense_codegen::models::{ AnalyticsEventCreateResponse, AnalyticsEventCreateSchema, AnalyticsRuleDeleteResponse, AnalyticsRuleParameters, AnalyticsRuleParametersDestination, AnalyticsRuleParametersSource, @@ -18,7 +22,7 @@ pub use typesense_codegen::models::{ MultiSearchResultItem, MultiSearchSearchesParameter, NlSearchModelBase, NlSearchModelCreateSchema, NlSearchModelDeleteSchema, NlSearchModelSchema, PresetDeleteSchema, PresetSchema, PresetUpsertSchema, PresetUpsertSchemaValue, PresetsRetrieveSchema, - SchemaChangeStatus, ScopedKeyParameters, SearchGroupedHit, SearchHighlight, SearchOverride, + SchemaChangeStatus, SearchGroupedHit, SearchHighlight, SearchOverride, SearchOverrideDeleteResponse, SearchOverrideExclude, SearchOverrideInclude, SearchOverrideRule, SearchOverrideSchema, SearchOverridesResponse, SearchParameters, SearchResultConversation, SearchResultHitTextMatchInfo, SearchResultRequestParams, SearchResultRequestParamsVoiceQuery, @@ -79,7 +83,6 @@ pub use typesense_codegen::models::{ preset_upsert_schema_value, presets_retrieve_schema, schema_change_status, - scoped_key_parameters, search_grouped_hit, search_highlight, search_override, diff --git a/typesense/src/models/scoped_key_parameters.rs b/typesense/src/models/scoped_key_parameters.rs new file mode 100644 index 0000000..b423b96 --- /dev/null +++ b/typesense/src/models/scoped_key_parameters.rs @@ -0,0 +1,25 @@ +use crate::models::SearchParameters; +use serde::Serialize; + +/// Defines the parameters for generating a scoped API key. +/// +/// A scoped key is a temporary, client-side key that has a specific set of +/// search restrictions and an optional expiration time embedded within it. It allows +/// you to delegate search permissions securely without exposing your main API key. +#[derive(Debug, Clone, Default, Serialize)] +pub struct ScopedKeyParameters { + /// The search parameters to embed in the key. These parameters will be + /// enforced for all searches made with the generated key. + /// For example, you can use `filter_by` to restrict searches to a subset of documents. + #[serde(flatten, skip_serializing_if = "Option::is_none")] + pub search_params: Option, + + /// The number of `multi_search` requests that can be performed using this key. + /// This is an optional parameter to further restrict the key's capabilities. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit_multi_searches: Option, + + /// The Unix timestamp (in seconds) after which the generated key will expire. + #[serde(skip_serializing_if = "Option::is_none")] + pub expires_at: Option, +} diff --git a/typesense/tests/client/keys_test.rs b/typesense/tests/client/keys_test.rs index 7af025f..e56ccc3 100644 --- a/typesense/tests/client/keys_test.rs +++ b/typesense/tests/client/keys_test.rs @@ -1,5 +1,5 @@ use super::get_client; -use typesense::models::ApiKeySchema; +use typesense::models::{ApiKeySchema, ScopedKeyParameters, SearchParameters}; #[tokio::test] async fn test_keys_lifecycle() { @@ -80,3 +80,42 @@ async fn test_keys_lifecycle() { "API key should not exist after deletion." ); } + +#[test] +fn test_generate_scoped_search_key_with_example_values() { + // The parent key with `documents:search` permissions. + let search_only_api_key = "RN23GFr1s6jQ9kgSNg2O7fYcAUXU7127"; + + // The parameters to be embedded in the new scoped key. + let params = ScopedKeyParameters { + search_params: Some(SearchParameters { + filter_by: Some("company_id:124".to_string()), + ..Default::default() + }), + expires_at: Some(1906054106), + ..Default::default() + }; + + // The known correct output from the Typesense documentation. + let expected_scoped_key = "OW9DYWZGS1Q1RGdSbmo0S1QrOWxhbk9PL2kxbTU1eXA3bCthdmE5eXJKRT1STjIzeyJmaWx0ZXJfYnkiOiJjb21wYW55X2lkOjEyNCIsImV4cGlyZXNfYXQiOjE5MDYwNTQxMDZ9"; + + let client = get_client(); + + let generated_key_result = client + .keys() + .generate_scoped_search_key(search_only_api_key, ¶ms); + + // First, ensure the function returned an Ok result. + assert!( + generated_key_result.is_ok(), + "Function returned an error: {:?}", + generated_key_result.err() + ); + + // Unwrap the result and compare it with the expected output. + let generated_key = generated_key_result.unwrap(); + assert_eq!( + generated_key, expected_scoped_key, + "The generated key does not match the expected key." + ); +} From 5a84fb3822a0bfa429791bed2e634c1383d481a4 Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Tue, 5 Aug 2025 21:42:15 +0700 Subject: [PATCH 09/21] feat: helper method to parse federated search results; added support for union search --- typesense/src/client/mod.rs | 2 +- typesense/src/client/multi_search.rs | 440 ++++++++++++++++---- typesense/src/error.rs | 53 +++ typesense/src/lib.rs | 5 +- typesense/src/models/mod.rs | 26 +- typesense/src/models/multi_search.rs | 23 + typesense/src/models/search_result.rs | 109 ++++- typesense/src/prelude.rs | 23 + typesense/tests/client/multi_search_test.rs | 282 +++++++++++-- typesense_codegen/src/apis/documents_api.rs | 3 +- 10 files changed, 818 insertions(+), 148 deletions(-) create mode 100644 typesense/src/models/multi_search.rs create mode 100644 typesense/src/prelude.rs diff --git a/typesense/src/client/mod.rs b/typesense/src/client/mod.rs index 4eda539..afc91f7 100644 --- a/typesense/src/client/mod.rs +++ b/typesense/src/client/mod.rs @@ -733,7 +733,7 @@ impl Client { /// # ..Default::default() /// # }; /// # let common_params = models::MultiSearchParameters::default(); - /// let results = client.multi_search().perform(search_requests, common_params).await.unwrap(); + /// let results = client.multi_search().perform(&search_requests, &common_params).await.unwrap(); /// # Ok(()) /// # } /// ``` diff --git a/typesense/src/client/multi_search.rs b/typesense/src/client/multi_search.rs index 8f6920a..37386db 100644 --- a/typesense/src/client/multi_search.rs +++ b/typesense/src/client/multi_search.rs @@ -2,16 +2,62 @@ //! //! A `MultiSearch` instance is created via the main `Client::multi_search()` method. -use crate::{Client, Error}; +use crate::{ + models::SearchResult, Client, Error, MultiSearchParseError, MultiSearchResultExt, + MultiSearchSearchesParameter, +}; +use serde::de::DeserializeOwned; use std::sync::Arc; use typesense_codegen::{ apis::{ configuration::Configuration, documents_api::{self, MultiSearchParams}, }, - models, // The generated model structs + models as raw_models, }; +fn multi_search_item_to_search_result( + item: &raw_models::MultiSearchResultItem, +) -> raw_models::SearchResult { + raw_models::SearchResult { + hits: item.hits.clone(), + facet_counts: item.facet_counts.clone(), + grouped_hits: item.grouped_hits.clone(), + found: item.found, + found_docs: item.found_docs, + out_of: item.out_of, + page: item.page, + search_time_ms: item.search_time_ms, + search_cutoff: item.search_cutoff, + request_params: item.request_params.clone(), + conversation: item.conversation.clone(), + } +} + +impl MultiSearchResultExt for raw_models::MultiSearchResult { + fn parse_at( + &self, + index: usize, + ) -> Result, MultiSearchParseError> { + let raw_item = self + .results + .get(index) + .ok_or(MultiSearchParseError::IndexOutOfBounds(index))?; + + if let Some(error_msg) = &raw_item.error { + return Err(MultiSearchParseError::ApiError { + index, + message: error_msg.clone(), + }); + } + + let raw_search_result = multi_search_item_to_search_result(raw_item); + // Map the serde error into our new, more specific error type. + SearchResult::::from_raw(raw_search_result) + .map_err(|source| MultiSearchParseError::Deserialization { index, source }) + } +} + /// Provides methods for managing Typesense API keys. /// /// This struct is created by calling `client.keys()`. @@ -25,100 +71,320 @@ impl<'a> MultiSearch<'a> { Self { client } } - /// Make multiple search requests in a single HTTP request to avoid round-trip network latencies. + /// Performs a **federated** multi-search operation, returning a list of search results. /// - /// You can use it in two different modes: - - /// - Federated search: each search request in the multi-search payload returns results as independently. - /// The results vector in the `multi_search` response is guaranteed to be in the same order as the queries you send in the `searches` vector in your request. - /// - Union search: the response of each search request is merged into a single unified order. + /// This function allows you to send multiple search queries in a single HTTP request, which is + /// efficient for reducing network latency. It is specifically designed for federated searches, + /// where each query in the request runs independently and returns its own corresponding result. + /// + /// The returned `MultiSearchResult` contains a `results` vector where each item maps to a + /// query in the request, in the exact same order. To process these results in a type-safe + /// way, you can use the `MultiSearchResultExt::parse_at` helper method. + /// + /// This is the default multi-search behavior in Typesense. For more details, see the + /// [official Typesense API documentation on federated search](https://typesense.org/docs/latest/api/federated-multi-search.html#federated-search). + /// + /// For **union** searches that merge all hits into a single ranked list, use the + /// `perform_union` method instead. /// + /// # Example + /// + /// This example demonstrates a federated search across two different collections. + /// + /// ```no_run + /// # use typesense::{Client, MultiNodeConfiguration, SearchResult, models, prelude::*}; + /// # use reqwest::Url; + /// # use serde::Deserialize; + /// # + /// # // Define the structs for your documents for typed parsing. + /// # #[derive(Deserialize, Debug)] + /// # struct Product { id: String, name: String } + /// # #[derive(Deserialize, Debug)] + /// # struct Brand { id: String, company_name: String } + /// # + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # let config = MultiNodeConfiguration { + /// # nodes: vec![Url::parse("http://localhost:8108")?], + /// # api_key: "xyz".to_string(), + /// # ..Default::default() + /// # }; + /// # let client = Client::new(config)?; + /// // Define the individual search queries for different collections. + /// let search_requests = models::MultiSearchSearchesParameter { + /// searches: vec![ + /// // Search #0 targets the 'products' collection + /// models::MultiSearchCollectionParameters { + /// collection: Some("products".to_string()), + /// q: Some("shoe".to_string()), + /// query_by: Some("name".to_string()), + /// ..Default::default() + /// }, + /// // Search #1 targets the 'brands' collection + /// models::MultiSearchCollectionParameters { + /// collection: Some("brands".to_string()), + /// q: Some("nike".to_string()), + /// query_by: Some("company_name".to_string()), + /// ..Default::default() + /// }, + /// ], + /// ..Default::default() + /// }; + /// + /// // Define parameters that will apply to all searches. + /// let common_params = models::MultiSearchParameters::default(); + /// + /// // Perform the federated multi-search. + /// let raw_response = client + /// .multi_search() + /// .perform(&search_requests, &common_params) + /// .await?; + /// + /// // The raw response contains a vector of results. + /// assert_eq!(raw_response.results.len(), 2); + /// + /// // Use the `parse_at` helper to get strongly-typed results for each search. + /// let typed_products: SearchResult = raw_response.parse_at(0)?; + /// let typed_brands: SearchResult = raw_response.parse_at(1)?; + /// + /// println!("Found {} products.", typed_products.found.unwrap_or(0)); + /// println!("Found {} brands.", typed_brands.found.unwrap_or(0)); + /// # Ok(()) + /// # } + /// ``` /// # Arguments - /// * `search_requests` - A `MultiSearchSearchesParameter` contain multiple search requests, this will be sent in the request body. - /// * `common_search_params` - A `MultiSearchParameters` describing search parameters that are common to all searches, these will be sent as URL query parameters. + /// * `search_requests` - A reference to a `MultiSearchSearchesParameter` containing the list of individual search queries. The `union` field is ignored. + /// * `common_search_params` - A reference to a `MultiSearchParameters` struct describing search parameters that are common to all searches. pub async fn perform( &self, - search_requests: models::MultiSearchSearchesParameter, - common_search_params: models::MultiSearchParameters, - ) -> Result> { - let params = common_search_params; - let multi_search_params = MultiSearchParams { - // enable_highlight_v1: None, - // max_candidates: None, - // max_filter_by_candidates: None, - // split_join_tokens: None, - multi_search_searches_parameter: Some(search_requests), - - // Common URL search params - cache_ttl: params.cache_ttl, - conversation: params.conversation, - conversation_id: params.conversation_id, - conversation_model_id: params.conversation_model_id, - drop_tokens_mode: params.drop_tokens_mode, - drop_tokens_threshold: params.drop_tokens_threshold, - enable_overrides: params.enable_overrides, - enable_synonyms: params.enable_synonyms, - enable_typos_for_alpha_numerical_tokens: params.enable_typos_for_alpha_numerical_tokens, - enable_typos_for_numerical_tokens: params.enable_typos_for_numerical_tokens, - exclude_fields: params.exclude_fields, - exhaustive_search: params.exhaustive_search, - facet_by: params.facet_by, - facet_query: params.facet_query, - facet_return_parent: params.facet_return_parent, - facet_strategy: params.facet_strategy, - filter_by: params.filter_by, - filter_curated_hits: params.filter_curated_hits, - group_by: params.group_by, - group_limit: params.group_limit, - group_missing_values: params.group_missing_values, - hidden_hits: params.hidden_hits, - highlight_affix_num_tokens: params.highlight_affix_num_tokens, - highlight_end_tag: params.highlight_end_tag, - highlight_fields: params.highlight_fields, - highlight_full_fields: params.highlight_full_fields, - highlight_start_tag: params.highlight_start_tag, - include_fields: params.include_fields, - infix: params.infix, - limit: params.limit, - max_extra_prefix: params.max_extra_prefix, - max_extra_suffix: params.max_extra_suffix, - max_facet_values: params.max_facet_values, - min_len_1typo: params.min_len_1typo, - min_len_2typo: params.min_len_2typo, - num_typos: params.num_typos, - offset: params.offset, - override_tags: params.override_tags, - page: params.page, - per_page: params.per_page, - pinned_hits: params.pinned_hits, - pre_segmented_query: params.pre_segmented_query, - prefix: params.prefix, - preset: params.preset, - prioritize_exact_match: params.prioritize_exact_match, - prioritize_num_matching_fields: params.prioritize_num_matching_fields, - prioritize_token_position: params.prioritize_token_position, - q: params.q, - query_by: params.query_by, - query_by_weights: params.query_by_weights, - remote_embedding_num_tries: params.remote_embedding_num_tries, - remote_embedding_timeout_ms: params.remote_embedding_timeout_ms, - search_cutoff_ms: params.search_cutoff_ms, - snippet_threshold: params.snippet_threshold, - sort_by: params.sort_by, - stopwords: params.stopwords, - synonym_num_typos: params.synonym_num_typos, - synonym_prefix: params.synonym_prefix, - text_match_type: params.text_match_type, - typo_tokens_threshold: params.typo_tokens_threshold, - use_cache: params.use_cache, - vector_query: params.vector_query, - voice_query: params.voice_query, + search_requests: &MultiSearchSearchesParameter, + common_search_params: &raw_models::MultiSearchParameters, + ) -> Result> { + let request_body = raw_models::MultiSearchSearchesParameter { + searches: search_requests.searches.clone(), + ..Default::default() + }; + let multi_search_params = build_multi_search_params(request_body, common_search_params); + + let raw_result = self + .client + .execute(|config: Arc| { + let params_for_move: MultiSearchParams = multi_search_params.clone(); + async move { documents_api::multi_search(&config, params_for_move).await } + }) + .await; + + // Now, handle the raw result and parse it into the strong type. + match raw_result { + Ok(json_value) => { + // The API call was successful and returned a JSON value. + // Now, we try to deserialize this value into our target struct. + serde_json::from_value(json_value) + // If from_value fails, it returns a `serde_json::Error`. + // We need to map this into the expected `Error::Serde` variant + // that the calling function expects. + .map_err(Error::from) + } + Err(e) => { + // The API call itself failed (e.g., network error, server 500). + // In this case, we just propagate the original error. + Err(e) + } + } + } + + /// Performs a multi-search request in **union** mode, returning a single, merged `SearchResult`. + /// + /// This function is ideal for building a federated search experience where results from + /// different collections are displayed together in a single, ranked list. It forces + /// `union: true` in the search request. + /// + /// For more details, see the + /// [official Typesense API documentation on union search](https://typesense.org/docs/latest/api/federated-multi-search.html#union-search). + /// + /// ### Handling Search Results + /// + /// The return type of this function is always `SearchResult` because + /// the search queries can target collections with different document schemas. + /// + /// #### 1. Heterogeneous Documents (Different Schemas) + /// + /// When searching across different collections (e.g., `products` and `brands`), you must + /// inspect the `serde_json::Value` of each document to determine its type before + /// deserializing it into a concrete struct. + /// + /// ```no_run + /// # use typesense::models::SearchResult; + /// # use serde_json::Value; + /// # #[derive(serde::Deserialize)] + /// # struct Product { name: String } + /// # #[derive(serde::Deserialize)] + /// # struct Brand { company_name: String } + /// # async fn run() -> Result<(), Box> { + /// # let search_result: SearchResult = todo!(); + /// for hit in search_result.hits.unwrap_or_default() { + /// if let Some(doc) = hit.document { + /// if doc.get("price").is_some() { + /// let product: Product = serde_json::from_value(doc)?; + /// println!("Found Product: {}", product.name); + /// } else if doc.get("country").is_some() { + /// let brand: Brand = serde_json::from_value(doc)?; + /// println!("Found Brand: {}", brand.company_name); + /// } + /// } + /// } + /// # Ok(()) + /// # } + /// ``` + /// + /// #### 2. Homogeneous Documents (Same Schema) + /// + /// If all search queries target collections that share the **same schema**, you can + /// convert the entire result into a strongly-typed `SearchResult` using the + /// [`SearchResult::try_into_typed`] helper method. This is much more convenient + /// than parsing each hit individually. + /// + /// ```no_run + /// # use typesense::models::SearchResult; + /// # use serde_json::Value; + /// # #[derive(serde::Deserialize)] + /// # struct Product { name: String } + /// # async fn run() -> Result<(), Box> { + /// # let client: typesense::Client = todo!(); + /// let value_result: SearchResult = client.multi_search().perform_union(todo!(), todo!()).await?; + /// + /// // Convert the entire result into a strongly-typed one. + /// let typed_result: SearchResult = value_result.try_into_typed()?; + /// + /// if let Some(product) = typed_result.hits.unwrap_or_default().get(0) { + /// println!("Found product: {}", product.document.as_ref().unwrap().name); + /// } + /// # Ok(()) + /// # } + /// ``` + /// + /// # Arguments + /// + /// * `search_requests` - A reference to a `MultiSearchSearchesParameter` containing the list of search queries to perform. + /// * `common_search_params` - A reference to search parameters that will be applied to all individual searches. + /// + /// # Returns + /// + /// A `Result` containing a `SearchResult` on success, or an `Error` on failure. + pub async fn perform_union( + &self, + search_requests: &MultiSearchSearchesParameter, + common_search_params: &raw_models::MultiSearchParameters, + ) -> Result, Error> { + // Explicitly set `union: true` for the request body, overriding any user value. + let request_body = raw_models::MultiSearchSearchesParameter { + union: Some(true), + searches: search_requests.searches.clone(), }; - self.client + + let multi_search_params = build_multi_search_params(request_body, common_search_params); + + // Execute the request to get the raw JSON value + let raw_result = self + .client .execute(|config: Arc| { let params_for_move = multi_search_params.clone(); async move { documents_api::multi_search(&config, params_for_move).await } }) - .await + .await; + + // Handle the result: parse to raw SearchResult, then convert to generic SearchResult + match raw_result { + Ok(json_value) => { + // A union search returns a single SearchResult object, not a MultiSearchResult. + // First, parse into the non-generic, raw model. + let raw_search_result: raw_models::SearchResult = + serde_json::from_value(json_value).map_err(Error::from)?; + + // Then, use your existing constructor to convert the raw result to the typed one, + // specifying `serde_json::Value` as the document type. + SearchResult::::from_raw(raw_search_result).map_err(Error::from) + } + Err(e) => Err(e), + } + } +} + +// Private helper function to construct the final search parameters object. +// This encapsulates the repetitive mapping logic. +fn build_multi_search_params( + request_body: raw_models::MultiSearchSearchesParameter, + params: &raw_models::MultiSearchParameters, +) -> MultiSearchParams { + MultiSearchParams { + multi_search_searches_parameter: Some(request_body), + // Common URL search params + cache_ttl: params.cache_ttl, + conversation: params.conversation, + conversation_id: params.conversation_id.clone(), + conversation_model_id: params.conversation_model_id.clone(), + drop_tokens_mode: params.drop_tokens_mode, + drop_tokens_threshold: params.drop_tokens_threshold, + enable_overrides: params.enable_overrides, + enable_synonyms: params.enable_synonyms, + enable_typos_for_alpha_numerical_tokens: params.enable_typos_for_alpha_numerical_tokens, + enable_typos_for_numerical_tokens: params.enable_typos_for_numerical_tokens, + exclude_fields: params.exclude_fields.clone(), + exhaustive_search: params.exhaustive_search, + facet_by: params.facet_by.clone(), + facet_query: params.facet_query.clone(), + facet_return_parent: params.facet_return_parent.clone(), + facet_strategy: params.facet_strategy.clone(), + filter_by: params.filter_by.clone(), + filter_curated_hits: params.filter_curated_hits, + group_by: params.group_by.clone(), + group_limit: params.group_limit, + group_missing_values: params.group_missing_values, + hidden_hits: params.hidden_hits.clone(), + highlight_affix_num_tokens: params.highlight_affix_num_tokens, + highlight_end_tag: params.highlight_end_tag.clone(), + highlight_fields: params.highlight_fields.clone(), + highlight_full_fields: params.highlight_full_fields.clone(), + highlight_start_tag: params.highlight_start_tag.clone(), + include_fields: params.include_fields.clone(), + infix: params.infix.clone(), + limit: params.limit, + max_extra_prefix: params.max_extra_prefix, + max_extra_suffix: params.max_extra_suffix, + max_facet_values: params.max_facet_values, + min_len_1typo: params.min_len_1typo, + min_len_2typo: params.min_len_2typo, + num_typos: params.num_typos.clone(), + offset: params.offset, + override_tags: params.override_tags.clone(), + page: params.page, + per_page: params.per_page, + pinned_hits: params.pinned_hits.clone(), + pre_segmented_query: params.pre_segmented_query, + prefix: params.prefix.clone(), + preset: params.preset.clone(), + prioritize_exact_match: params.prioritize_exact_match, + prioritize_num_matching_fields: params.prioritize_num_matching_fields, + prioritize_token_position: params.prioritize_token_position, + q: params.q.clone(), + query_by: params.query_by.clone(), + query_by_weights: params.query_by_weights.clone(), + remote_embedding_num_tries: params.remote_embedding_num_tries, + remote_embedding_timeout_ms: params.remote_embedding_timeout_ms, + search_cutoff_ms: params.search_cutoff_ms, + snippet_threshold: params.snippet_threshold, + sort_by: params.sort_by.clone(), + stopwords: params.stopwords.clone(), + synonym_num_typos: params.synonym_num_typos, + synonym_prefix: params.synonym_prefix, + text_match_type: params.text_match_type.clone(), + typo_tokens_threshold: params.typo_tokens_threshold, + use_cache: params.use_cache, + vector_query: params.vector_query.clone(), + voice_query: params.voice_query.clone(), + // enable_highlight_v1: None, + // max_candidates: None, + // max_filter_by_candidates: None, + // split_join_tokens: None, } } diff --git a/typesense/src/error.rs b/typesense/src/error.rs index dd54c2c..2e5610b 100644 --- a/typesense/src/error.rs +++ b/typesense/src/error.rs @@ -52,3 +52,56 @@ where #[error("Failed to deserialize the API response into the target struct: {0}")] Deserialization(#[from] serde_json::Error), } + +/// Represents the possible errors that can occur when parsing a `multi_search` response. +/// +/// This error enum is returned by the `MultiSearchResultExt::parse_at` method when it +/// fails to convert a raw search result into a strongly-typed `SearchResult`. +#[derive(Debug, Error)] +pub enum MultiSearchParseError { + /// Indicates that the requested index was outside the bounds of the results vector. + /// + /// For a `multi_search` request with `n` search queries, the valid indices for the + /// results are `0` through `n-1`. This error occurs if the provided index is `n` or greater. + /// + /// # Fields + /// * `0` - The invalid index that was requested. + #[error("Search result index {0} is out of bounds.")] + IndexOutOfBounds(usize), + + /// Indicates that the Typesense server returned an error for the specific search query at this index. + /// + // It's possible for a `multi_search` request to succeed overall, but for one or more + // individual searches within it to fail (e.g., due to a typo in a collection name). + /// + /// # Fields + /// * `index` - The index of the search query that failed. + /// * `message` - The error message returned by the Typesense API for this specific search. + #[error("The search at index {index} failed with an API error: {message}")] + ApiError { + /// The index of the search query that failed. + index: usize, + /// The error message returned by the Typesense API for this specific search. + message: String, + }, + + /// Indicates a failure to deserialize a document's JSON into the target struct `T`. + /// + /// This typically happens when the fields in the document stored in Typesense do not + /// match the fields defined in the target Rust struct `T`. Check for mismatches in + /// field names or data types. + /// + /// # Fields + /// * `index` - The index of the search query where the deserialization error occurred. + /// * `source` - The underlying `serde_json::Error` that provides detailed information + /// about the deserialization failure. + #[error("Failed to deserialize a document at index {index}: {source}")] + Deserialization { + /// The index of the search query where the deserialization error occurred. + index: usize, + /// The underlying `serde_json::Error` that provides detailed information + /// about the deserialization failure. + #[source] + source: serde_json::Error, + }, +} diff --git a/typesense/src/lib.rs b/typesense/src/lib.rs index 57cc873..2ac59cf 100644 --- a/typesense/src/lib.rs +++ b/typesense/src/lib.rs @@ -49,11 +49,14 @@ mod error; pub mod collection_schema; pub mod document; pub mod field; +pub mod prelude; // pub mod keys; pub mod models; pub use client::{Client, MultiNodeConfiguration}; -pub use error::{ApiError, Error}; +pub use error::*; +pub use models::*; +pub use prelude::*; #[cfg(feature = "typesense_derive")] #[doc(hidden)] diff --git a/typesense/src/models/mod.rs b/typesense/src/models/mod.rs index 9c13056..d4c7f26 100644 --- a/typesense/src/models/mod.rs +++ b/typesense/src/models/mod.rs @@ -1,7 +1,8 @@ //! # Typesense generic models +mod multi_search; mod scoped_key_parameters; mod search_result; - +pub use multi_search::*; pub use scoped_key_parameters::*; pub use search_result::*; @@ -19,18 +20,17 @@ pub use typesense_codegen::models::{ FacetCountsStats, Field, FieldEmbed, FieldEmbedModelConfig, HealthStatus, ImportDocumentsParameters, IndexAction, ListStemmingDictionaries200Response, MultiSearchCollectionParameters, MultiSearchParameters, MultiSearchResult, - MultiSearchResultItem, MultiSearchSearchesParameter, NlSearchModelBase, - NlSearchModelCreateSchema, NlSearchModelDeleteSchema, NlSearchModelSchema, PresetDeleteSchema, - PresetSchema, PresetUpsertSchema, PresetUpsertSchemaValue, PresetsRetrieveSchema, - SchemaChangeStatus, SearchGroupedHit, SearchHighlight, SearchOverride, - SearchOverrideDeleteResponse, SearchOverrideExclude, SearchOverrideInclude, SearchOverrideRule, - SearchOverrideSchema, SearchOverridesResponse, SearchParameters, SearchResultConversation, - SearchResultHitTextMatchInfo, SearchResultRequestParams, SearchResultRequestParamsVoiceQuery, - SearchSynonym, SearchSynonymDeleteResponse, SearchSynonymSchema, SearchSynonymsResponse, - SnapshotParameters, StemmingDictionary, StemmingDictionaryWordsInner, - StopwordsSetRetrieveSchema, StopwordsSetSchema, StopwordsSetUpsertSchema, - StopwordsSetsRetrieveAllSchema, SuccessStatus, UpdateDocuments200Response, - UpdateDocumentsParameters, VoiceQueryModelCollectionConfig, + MultiSearchResultItem, NlSearchModelBase, NlSearchModelCreateSchema, NlSearchModelDeleteSchema, + NlSearchModelSchema, PresetDeleteSchema, PresetSchema, PresetUpsertSchema, + PresetUpsertSchemaValue, PresetsRetrieveSchema, SchemaChangeStatus, SearchGroupedHit, + SearchHighlight, SearchOverride, SearchOverrideDeleteResponse, SearchOverrideExclude, + SearchOverrideInclude, SearchOverrideRule, SearchOverrideSchema, SearchOverridesResponse, + SearchParameters, SearchResultConversation, SearchResultHitTextMatchInfo, + SearchResultRequestParams, SearchResultRequestParamsVoiceQuery, SearchSynonym, + SearchSynonymDeleteResponse, SearchSynonymSchema, SearchSynonymsResponse, SnapshotParameters, + StemmingDictionary, StemmingDictionaryWordsInner, StopwordsSetRetrieveSchema, + StopwordsSetSchema, StopwordsSetUpsertSchema, StopwordsSetsRetrieveAllSchema, SuccessStatus, + UpdateDocuments200Response, UpdateDocumentsParameters, VoiceQueryModelCollectionConfig, }; // Only re-export the sub modules that have enums inside them. pub use typesense_codegen::models::{ diff --git a/typesense/src/models/multi_search.rs b/typesense/src/models/multi_search.rs new file mode 100644 index 0000000..3e67ccd --- /dev/null +++ b/typesense/src/models/multi_search.rs @@ -0,0 +1,23 @@ +use crate::models; +use serde::{Deserialize, Serialize}; + +/// Represents the body of a multi-search request. +/// +/// This struct acts as a container for a list of individual search queries that will be +/// sent to the Typesense multi-search endpoint. Each search query is defined in a +/// `MultiSearchCollectionParameters` struct within the `searches` vector. +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct MultiSearchSearchesParameter { + /// A vector of individual search queries to be executed. The order of the search results returned by Typesense will match the order of these queries. + #[serde(rename = "searches")] + pub searches: Vec, +} + +impl MultiSearchSearchesParameter { + /// Creates a new `MultiSearchSearchesParameter` instance. + pub fn new( + searches: Vec, + ) -> MultiSearchSearchesParameter { + MultiSearchSearchesParameter { searches } + } +} diff --git a/typesense/src/models/search_result.rs b/typesense/src/models/search_result.rs index e71d16b..62c69be 100644 --- a/typesense/src/models/search_result.rs +++ b/typesense/src/models/search_result.rs @@ -1,6 +1,7 @@ //! Contains the generic `SearchResult` and `SearchResultHit` structs use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use serde_json::Value; use typesense_codegen::models as raw_models; /// Represents a single search result hit, with the document deserialized into a strongly-typed struct `T`. @@ -105,24 +106,28 @@ where ) -> Result { let typed_hits = match raw_result.hits { Some(raw_hits) => { - let mut hits = Vec::with_capacity(raw_hits.len()); - for raw_hit in raw_hits { - let document: Option = match raw_hit.document { - Some(doc_value) => Some(serde_json::from_value(doc_value)?), - None => None, - }; - - hits.push(SearchResultHit { - document, - highlights: raw_hit.highlights, - highlight: raw_hit.highlight, - text_match: raw_hit.text_match, - text_match_info: raw_hit.text_match_info, - geo_distance_meters: raw_hit.geo_distance_meters, - vector_distance: raw_hit.vector_distance, - }); - } - Some(hits) + let hits_result: Result>, _> = raw_hits + .into_iter() + .map(|raw_hit| { + // Map each raw hit to a Result, _> + let document: Result, _> = raw_hit + .document + .map(|doc_value| serde_json::from_value(doc_value)) + .transpose(); + + Ok(SearchResultHit { + document: document?, + highlights: raw_hit.highlights, + highlight: raw_hit.highlight, + text_match: raw_hit.text_match, + text_match_info: raw_hit.text_match_info, + geo_distance_meters: raw_hit.geo_distance_meters, + vector_distance: raw_hit.vector_distance, + }) + }) + .collect(); + + Some(hits_result?) } None => None, }; @@ -142,3 +147,71 @@ where }) } } + +// This impl block specifically targets `SearchResult`. +// The methods inside will only be available on a search result of that exact type. +impl SearchResult { + /// Attempts to convert a `SearchResult` into a `SearchResult`. + /// + /// This method is useful after a `perform_union` call where you know all resulting + /// documents share the same schema and can be deserialized into a single concrete type `T`. + /// + /// It iterates through each hit and tries to deserialize its `document` field. If any + /// document fails to deserialize into type `T`, the entire conversion fails. + /// + /// # Type Parameters + /// + /// * `T` - The concrete, `DeserializeOwned` type you want to convert the documents into. + /// + /// # Errors + /// + /// Returns a `serde_json::Error` if any document in the hit list cannot be successfully + /// deserialized into `T`. + pub fn try_into_typed(self) -> Result, serde_json::Error> { + // This logic is very similar to `from_raw`, but it converts between generic types + // instead of from a raw model. + let typed_hits = match self.hits { + Some(value_hits) => { + let hits_result: Result>, _> = value_hits + .into_iter() + .map(|value_hit| { + // `value_hit` here is `SearchResultHit` + let document: Option = match value_hit.document { + Some(doc_value) => Some(serde_json::from_value(doc_value)?), + None => None, + }; + + // Construct the new, strongly-typed hit. + Ok(SearchResultHit { + document, + highlights: value_hit.highlights, + highlight: value_hit.highlight, + text_match: value_hit.text_match, + text_match_info: value_hit.text_match_info, + geo_distance_meters: value_hit.geo_distance_meters, + vector_distance: value_hit.vector_distance, + }) + }) + .collect(); + + Some(hits_result?) + } + None => None, + }; + + // Construct the final, strongly-typed search result, carrying over all metadata. + Ok(SearchResult { + hits: typed_hits, + found: self.found, + found_docs: self.found_docs, + out_of: self.out_of, + page: self.page, + search_time_ms: self.search_time_ms, + facet_counts: self.facet_counts, + grouped_hits: self.grouped_hits, + search_cutoff: self.search_cutoff, + request_params: self.request_params, + conversation: self.conversation, + }) + } +} diff --git a/typesense/src/prelude.rs b/typesense/src/prelude.rs new file mode 100644 index 0000000..9c591cd --- /dev/null +++ b/typesense/src/prelude.rs @@ -0,0 +1,23 @@ +//! The Typesense prelude. +//! +//! This module re-exports the most commonly used traits and types from the library, +//! making them easy to import with a single `use` statement. + +use serde::de::DeserializeOwned; + +use crate::{MultiSearchParseError, SearchResult}; + +/// An extension trait for `typesense_codegen::models::MultiSearchResult` to provide typed parsing. +pub trait MultiSearchResultExt { + /// Parses the result at a specific index from a multi-search response into a strongly-typed `SearchResult`. + /// + /// # Arguments + /// * `index` - The zero-based index of the search result to parse. + /// + /// # Type Parameters + /// * `T` - The concrete document type to deserialize the hits into. + fn parse_at( + &self, + index: usize, + ) -> Result, MultiSearchParseError>; +} diff --git a/typesense/tests/client/multi_search_test.rs b/typesense/tests/client/multi_search_test.rs index c3477e6..1d2da59 100644 --- a/typesense/tests/client/multi_search_test.rs +++ b/typesense/tests/client/multi_search_test.rs @@ -1,6 +1,10 @@ -use typesense::models::{ - CollectionSchema, Field, ImportDocumentsParameters, MultiSearchCollectionParameters, - MultiSearchParameters, MultiSearchSearchesParameter, +use serde::Deserialize; +use typesense::{ + models::{ + CollectionSchema, Field, ImportDocumentsParameters, MultiSearchCollectionParameters, + MultiSearchParameters, MultiSearchSearchesParameter, SearchResult, + }, + prelude::*, }; use super::{get_client, new_id}; @@ -87,7 +91,6 @@ async fn test_multi_search_federated() { setup_multi_search_tests(&client, &products_collection_name, &brands_collection_name).await; let search_requests = MultiSearchSearchesParameter { - union: Some(false), searches: vec![ MultiSearchCollectionParameters { q: Some("pro".into()), @@ -108,7 +111,7 @@ async fn test_multi_search_federated() { let result = client .multi_search() - .perform(search_requests, common_params) + .perform(&search_requests, &common_params) .await; assert!(result.is_ok(), "Multi-search request failed"); @@ -147,18 +150,6 @@ async fn test_multi_search_federated() { brand_doc.get("company_name").unwrap().as_str(), Some("Apple Inc.") ); - - // --- Cleanup --- - client - .collection(&products_collection_name) - .delete() - .await - .unwrap(); - client - .collection(&brands_collection_name) - .delete() - .await - .unwrap(); } #[tokio::test] @@ -170,7 +161,6 @@ async fn test_multi_search_with_common_params() { // Define individual searches, each with the correct `query_by` for its schema. let search_requests = MultiSearchSearchesParameter { - union: Some(false), searches: vec![ MultiSearchCollectionParameters { collection: Some(products_collection_name.clone()), @@ -194,7 +184,7 @@ async fn test_multi_search_with_common_params() { let result = client .multi_search() - .perform(search_requests, common_params) + .perform(&search_requests, &common_params) .await; assert!( @@ -233,16 +223,254 @@ async fn test_multi_search_with_common_params() { brand_hit.document.as_ref().unwrap()["company_name"], "Apple Inc." ); +} - // --- Cleanup --- - client - .collection(&products_collection_name) - .delete() +#[derive(Debug, Deserialize, PartialEq)] +struct Product { + id: String, + name: String, + price: i32, +} + +#[derive(Debug, Deserialize, PartialEq)] +struct Brand { + id: String, + company_name: String, + country: String, +} + +#[tokio::test] +async fn test_multi_search_generic_parsing() { + let client = get_client(); + let products_collection_name = new_id("products_generic"); + let brands_collection_name = new_id("brands_generic"); + setup_multi_search_tests(&client, &products_collection_name, &brands_collection_name).await; + + let search_requests = MultiSearchSearchesParameter { + searches: vec![ + // Search #0 for products + MultiSearchCollectionParameters { + q: Some("pro".into()), + query_by: Some("name".into()), + collection: Some(products_collection_name.clone()), + ..Default::default() + }, + // Search #1 for brands + MultiSearchCollectionParameters { + q: Some("USA".into()), + query_by: Some("country".into()), + collection: Some(brands_collection_name.clone()), + ..Default::default() + }, + ], + ..Default::default() + }; + + let common_params = MultiSearchParameters::default(); + + // Perform the search and get the raw, untyped response + let raw_response = client + .multi_search() + .perform(&search_requests, &common_params) .await .unwrap(); - client - .collection(&brands_collection_name) - .delete() + + // --- Use the new generic parsing feature --- + + // Parse the first result set (index 0) into SearchResult + let products_result: SearchResult = + raw_response.parse_at(0).expect("Parsing products failed"); + + // Parse the second result set (index 1) into SearchResult + let brands_result: SearchResult = + raw_response.parse_at(1).expect("Parsing brands failed"); + + // --- Assert the strongly-typed results --- + + // Assert products result + assert_eq!(products_result.found, Some(1), "Expected to find 1 product"); + let product_hit = &products_result + .hits + .as_ref() + .unwrap() + .get(0) + .expect("No product hits found"); + let product_doc = product_hit + .document + .as_ref() + .expect("Product hit has no document"); + + assert_eq!(product_doc.name, "MacBook Pro"); + assert_eq!(product_doc.price, 1999); + assert_eq!( + *product_doc, + Product { + id: "p2".to_string(), + name: "MacBook Pro".to_string(), + price: 1999, + } + ); + + // Assert brands result + assert_eq!(brands_result.found, Some(1), "Expected to find 1 brand"); + let brand_hit = &brands_result + .hits + .as_ref() + .unwrap() + .get(0) + .expect("No brand hits found"); + let brand_doc = brand_hit + .document + .as_ref() + .expect("Brand hit has no document"); + + assert_eq!(brand_doc.company_name, "Apple Inc."); + assert_eq!( + *brand_doc, + Brand { + id: "b1".to_string(), + company_name: "Apple Inc.".to_string(), + country: "USA".to_string(), + } + ); +} + +#[tokio::test] +async fn test_multi_search_union_heterogeneous() { + let client = get_client(); + let products_collection_name = new_id("products_union"); + let brands_collection_name = new_id("brands_union"); + setup_multi_search_tests(&client, &products_collection_name, &brands_collection_name).await; + + // We will search for "pro" in products and "samsung" in brands. + // This should yield one hit from each collection. + let search_requests = MultiSearchSearchesParameter { + searches: vec![ + MultiSearchCollectionParameters { + q: Some("pro".into()), + query_by: Some("name".into()), + collection: Some(products_collection_name.clone()), + ..Default::default() + }, + MultiSearchCollectionParameters { + q: Some("samsung".into()), + query_by: Some("company_name".into()), + collection: Some(brands_collection_name.clone()), + ..Default::default() + }, + ], + }; + + let common_params = MultiSearchParameters::default(); + + // Call the new union function + let result = client + .multi_search() + .perform_union(&search_requests, &common_params) + .await; + + assert!( + result.is_ok(), + "Union multi-search request failed: {:?}", + result.err() + ); + let response = result.unwrap(); + + // In a union search, we expect a single merged result set. + // We found "MacBook Pro" and "Samsung". + assert_eq!(response.found, Some(2)); + let hits = response.hits.expect("Expected to find hits"); + assert_eq!(hits.len(), 2); + + // --- Process the heterogeneous hits --- + // This demonstrates how a user would handle the `serde_json::Value` documents. + let mut product_count = 0; + let mut brand_count = 0; + + for hit in hits { + let document = hit.document.as_ref().unwrap(); + + // Check for a field unique to the Product schema to identify the document type. + if document.get("price").is_some() { + let product: Product = + serde_json::from_value(document.clone()).expect("Failed to parse Product"); + assert_eq!(product.name, "MacBook Pro"); + product_count += 1; + } + // Check for a field unique to the Brand schema. + else if document.get("company_name").is_some() { + let brand: Brand = + serde_json::from_value(document.clone()).expect("Failed to parse Brand"); + assert_eq!(brand.company_name, "Samsung"); + brand_count += 1; + } + } + + // Verify that we correctly identified one of each type from the merged results. + assert_eq!( + product_count, 1, + "Expected to find 1 product in the union result" + ); + assert_eq!( + brand_count, 1, + "Expected to find 1 brand in the union result" + ); +} + +#[tokio::test] +async fn test_multi_search_union_homogeneous_and_typed_conversion() { + let client = get_client(); + let products_collection_name = new_id("products_union_homo"); + // We only need one collection for this test, but the setup creates two. + let brands_collection_name = new_id("brands_union_homo_unused"); + setup_multi_search_tests(&client, &products_collection_name, &brands_collection_name).await; + + // Both search queries target the *same* products collection. + let search_requests = MultiSearchSearchesParameter { + searches: vec![ + // This query should find "iPhone 15" + MultiSearchCollectionParameters { + q: Some("iphone".into()), + query_by: Some("name".into()), + collection: Some(products_collection_name.clone()), + ..Default::default() + }, + // This query should find "MacBook Pro" + MultiSearchCollectionParameters { + q: Some("macbook".into()), + query_by: Some("name".into()), + collection: Some(products_collection_name.clone()), + ..Default::default() + }, + ], + }; + + // 1. Call perform_union, which returns a SearchResult + let value_result = client + .multi_search() + .perform_union(&search_requests, &MultiSearchParameters::default()) .await - .unwrap(); + .expect("Union search failed"); + + // 2. Use the helper to convert it to a SearchResult + let typed_result: SearchResult = value_result + .try_into_typed() + .expect("Conversion to typed result failed"); + + // 3. Assert the strongly-typed result + assert_eq!(typed_result.found, Some(2)); + let mut hits = typed_result.hits.expect("Expected hits"); + + // Sort by price to have a predictable order for assertions. + hits.sort_by_key(|h| h.document.as_ref().unwrap().price); + + // Assert the first hit (iPhone) + let iphone = &hits[0].document.as_ref().unwrap(); + assert_eq!(iphone.name, "iPhone 15"); + assert_eq!(iphone.price, 999); + + // Assert the second hit (MacBook Pro) + let macbook = &hits[1].document.as_ref().unwrap(); + assert_eq!(macbook.name, "MacBook Pro"); + assert_eq!(macbook.price, 1999); } diff --git a/typesense_codegen/src/apis/documents_api.rs b/typesense_codegen/src/apis/documents_api.rs index e5335ff..906b4c7 100644 --- a/typesense_codegen/src/apis/documents_api.rs +++ b/typesense_codegen/src/apis/documents_api.rs @@ -947,7 +947,8 @@ pub async fn index_document( pub async fn multi_search( configuration: &configuration::Configuration, params: MultiSearchParams, -) -> Result> { + // was modified by hand +) -> Result> { let uri_str = format!("{}/multi_search", configuration.base_path); let mut req_builder = configuration .client From 2cb188f9f98cef94eac72fb2aa771efafac147c2 Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Sun, 10 Aug 2025 20:36:00 +0700 Subject: [PATCH 10/21] feat(derive): Implement advanced nested and flattened field handling This commit introduces a comprehensive and robust implementation for handling complex document structures within the `#[derive(Typesense)]` macro, enabling powerful schema generation directly from Rust structs. The macro now supports the full range of advanced indexing strategies offered by Typesense, including automatic object indexing, field flattening with prefix control, and patterns for manual flattening. ### Key Features & Implementation Details - **Automatic Object Indexing:** - A field containing a nested struct that also derives `Document` is now automatically mapped to a Typesense `object` (or `object[]` for `Vec`). - This feature requires `#[typesense(enable_nested_fields = true)]` on the parent collection, which the macro now supports. - **Automatic Field Flattening with `#[typesense(flatten)]`:** - A field marked `#[typesense(flatten)]` has its sub-fields expanded into the parent schema using dot-notation. - By default, the Rust field's name is used as the prefix for all sub-fields (e.g., `details: ProductDetails` results in schema fields like `details.part_number`). - **Prefix Override for Flattening:** - The `flatten` attribute can be combined with `rename` to provide a custom prefix for the flattened fields. - Usage: `#[typesense(flatten, rename = "custom_prefix")]` - This provides powerful schema mapping flexibility, allowing the Rust struct's field name to differ from the prefix used in the Typesense schema. - **Manual Flattening Pattern (`skip` + `rename`):** - A new `#[typesense(skip)]` attribute has been introduced to completely exclude a field from the generated Typesense schema. - This enables the powerful pattern of sending both nested and flattened data to Typesense: the nested version can be used for display/deserialization, while a separate set of flattened fields is used for indexing. This is achieved by: 1. Marking the nested struct field (e.g., `details: Details`) with `#[typesense(skip)]`. 2. Adding corresponding top-level fields to the Rust struct, marked with `#[typesense(rename = "details.field_name")]`. - **Ergonomic Boolean Attributes:** - All boolean attributes (`facet`, `sort`, `index`, `store`, `infix`, `stem`, `optional`, `range_index`) now support shorthand "flag" syntax. - For example, `#[typesense(sort)]` is a valid and recommended equivalent to `#[typesense(sort = true)]`, dramatically improving readability and consistency. - **Robust Error Handling & Validation:** - The macro provides clear, compile-time errors for invalid or ambiguous attribute usage. - It correctly detects and reports duplicate attributes, whether they are in the same `#[typesense(...)]` block or across multiple attributes on the same field. ### Testing - **Comprehensive Integration Test (`derive_integration.rs`):** - A new, full-lifecycle integration test has been added to validate the entire feature set. - The test defines a complex struct using every new attribute and pattern, generates a schema, creates a real collection, and uses the generic client (`collection_of`) to perform and validate a full Create, Read, Update, Delete, and Search lifecycle. - A second integration test was added to specifically validate the manual flattening pattern. - **UI Tests:** - `trybuild` UI tests have been added to verify that the macro produces the correct compile-time errors for invalid attribute combinations, such as duplicate attributes. --- typesense/src/field/field_type.rs | 15 +- typesense/src/field/mod.rs | 36 ++ .../tests/client/derive_integration_test.rs | 537 ++++++++++++++++++ typesense/tests/client/mod.rs | 33 +- typesense/tests/derive/collection.rs | 53 -- .../derive/derive_collection_schema_test.rs | 303 ++++++++++ typesense/tests/derive/lib.rs | 2 +- .../derive/ui/duplicated_attribute.stderr | 16 +- .../tests/derive/ui/unknown_attribute.stderr | 4 +- typesense_derive/src/field_attrs.rs | 413 ++++++++++++++ typesense_derive/src/lib.rs | 173 +++--- 11 files changed, 1413 insertions(+), 172 deletions(-) create mode 100644 typesense/tests/client/derive_integration_test.rs delete mode 100644 typesense/tests/derive/collection.rs create mode 100644 typesense/tests/derive/derive_collection_schema_test.rs create mode 100644 typesense_derive/src/field_attrs.rs diff --git a/typesense/src/field/field_type.rs b/typesense/src/field/field_type.rs index 3150ad9..da98b48 100644 --- a/typesense/src/field/field_type.rs +++ b/typesense/src/field/field_type.rs @@ -1,5 +1,5 @@ +use crate::document::Document; use std::collections::{BTreeMap, HashMap}; - /// Type for a field. Currently it is a wrapping to a `String` but it could be extended to a enum pub type FieldType = String; @@ -10,6 +10,19 @@ pub trait ToTypesenseField { fn to_typesense_type() -> &'static str; } +/// Generic implementation for any type that is also a Typesense document. +impl ToTypesenseField for T { + fn to_typesense_type() -> &'static str { + "object" + } +} + +/// Generic implementation for a Vec of any type that is also a Typesense document. +impl ToTypesenseField for Vec { + fn to_typesense_type() -> &'static str { + "object[]" + } +} /// macro used internally to add implementations of ToTypesenseField for several rust types. #[macro_export] macro_rules! impl_to_typesense_field ( diff --git a/typesense/src/field/mod.rs b/typesense/src/field/mod.rs index 4cd98b6..5629728 100644 --- a/typesense/src/field/mod.rs +++ b/typesense/src/field/mod.rs @@ -20,6 +20,10 @@ pub struct FieldBuilder { num_dim: Option, drop: Option, embed: Option>, + store: Option, + stem: Option, + range_index: Option, + vec_dist: Option, } impl FieldBuilder { @@ -89,6 +93,34 @@ impl FieldBuilder { self } + /// Set store attribute for field + #[inline] + pub fn store(mut self, store: Option) -> Self { + self.store = store; + self + } + + /// Set stem attribute for field + #[inline] + pub fn stem(mut self, stem: Option) -> Self { + self.stem = stem; + self + } + + /// Set range_index attribute for field + #[inline] + pub fn range_index(mut self, range_index: Option) -> Self { + self.range_index = range_index; + self + } + + /// Set vec_dist attribute for field + #[inline] + pub fn vec_dist(mut self, vec_dist: Option) -> Self { + self.vec_dist = vec_dist; + self + } + /// Create a `Field` with the current values of the builder, /// It can fail if the name or the typesense_type are not defined. #[inline] @@ -105,6 +137,10 @@ impl FieldBuilder { num_dim: self.num_dim, drop: self.drop, embed: self.embed, + store: self.store, + stem: self.stem, + range_index: self.range_index, + vec_dist: self.vec_dist, ..Default::default() } } diff --git a/typesense/tests/client/derive_integration_test.rs b/typesense/tests/client/derive_integration_test.rs new file mode 100644 index 0000000..98516bc --- /dev/null +++ b/typesense/tests/client/derive_integration_test.rs @@ -0,0 +1,537 @@ +use serde::{Deserialize, Serialize}; +use typesense::models::SearchParameters; +use typesense::Typesense; +use typesense::{document::Document, Field}; + +use crate::{get_client, new_id}; + +/// A nested struct that will be flattened into the parent. +#[derive(Typesense, Serialize, Deserialize, Debug, PartialEq, Clone)] +struct ProductDetails { + #[typesense(facet)] + part_number: String, + #[typesense(sort = false)] + weight_kg: f32, + #[typesense(skip)] + desc: String, +} +/// A nested struct that will be flattened and renamed. + +#[derive(Typesense, Serialize, Deserialize, Debug, PartialEq, Clone)] +struct Logistics { + warehouse_code: String, + shipping_class: String, +} + +/// A nested struct that will be indexed as a single "object". +#[derive(Typesense, Serialize, Deserialize, Debug, PartialEq, Clone)] +struct Manufacturer { + name: String, + city: String, +} + +/// The main "uber" struct that uses every feature of the derive macro. +#[derive(Typesense, Serialize, Deserialize, Debug, PartialEq, Clone)] +#[typesense( + collection_name = "mega_products", + default_sorting_field = "price", + enable_nested_fields = true, + token_separators = ["-", "/"], + symbols_to_index = ["+"] +)] +struct MegaProduct { + id: String, + + #[typesense(infix, stem)] + title: String, + + #[typesense(rename = "product_name")] + #[serde(rename = "product_name")] + official_name: String, + + #[typesense(facet)] + brand: String, + + #[typesense(sort)] + price: f32, + + #[typesense(range_index)] + review_score: f32, + + #[typesense(index = false, store = false)] + internal_sku: Option, + + #[typesense(type = "geopoint")] + location: (f32, f32), + + #[typesense(num_dim = 4, vec_dist = "cosine")] + embedding: Vec, + + #[typesense(flatten)] + details: ProductDetails, + + #[typesense(flatten, rename = "logistics_data")] + #[serde(rename = "logistics_data")] + logistics: Logistics, + + manufacturer: Manufacturer, + + tags: Option>, +} + +#[tokio::test] +async fn test_derive_macro_with_generic_client_lifecycle() { + let client = get_client(); + let collection_name = new_id("mega_products_test"); + + // Create Collection using the schema from the derive macro + let schema = MegaProduct::collection_schema(); + let mut schema_for_creation = schema.clone(); + schema_for_creation.name = collection_name.clone(); // Use the unique name + + let create_res = client.collections().create(schema_for_creation).await; + assert!( + create_res.is_ok(), + "Failed to create collection: {:?}", + create_res.err() + ); + + // Verify the schema on the server with targeted assertions + let retrieved_schema = client + .collection(&collection_name) + .retrieve() + .await + .unwrap(); + + // Create a map of the actual fields for easy lookup. + let actual_fields_map: std::collections::HashMap = retrieved_schema + .fields + .into_iter() + .map(|f| (f.name.clone(), f)) + .collect(); + + // Iterate through our *expected* fields and assert only the attributes we set. + for expected_field in schema.fields { + let field_name = &expected_field.name; + // The 'id' field is a special primary key and not listed in the schema's "fields" array. + if field_name == "id" { + continue; + } + let actual_field = actual_fields_map.get(field_name).unwrap_or_else(|| { + panic!( + "Field '{}' expected but not found in retrieved schema", + field_name + ) + }); + + // Perform targeted checks based on the attributes set in MegaProduct struct + match field_name.as_str() { + "title" => { + assert_eq!( + actual_field.infix, + Some(true), + "Field 'title' should have infix: true" + ); + assert_eq!( + actual_field.stem, + Some(true), + "Field 'title' should have stem: true" + ); + } + "product_name" => { + // This is the renamed `official_name` + assert_eq!( + actual_field.name, "product_name", + "Field 'official_name' should be renamed to 'product_name'" + ); + } + "brand" => { + assert_eq!( + actual_field.facet, + Some(true), + "Field 'brand' should have facet: true" + ); + } + "price" => { + assert_eq!( + actual_field.sort, + Some(true), + "Field 'price' should have sort: true" + ); + } + "review_score" => { + assert_eq!( + actual_field.range_index, + Some(true), + "Field 'review_score' should have range_index: true" + ); + } + "internal_sku" => { + assert_eq!( + actual_field.index, + Some(false), + "Field 'internal_sku' should have index: false" + ); + assert_eq!( + actual_field.store, + Some(false), + "Field 'internal_sku' should have store: false" + ); + } + "location" => { + assert_eq!( + actual_field.r#type, "geopoint", + "Field 'location' should have type: 'geopoint'" + ); + } + "embedding" => { + assert_eq!( + actual_field.num_dim, + Some(4), + "Field 'embedding' should have num_dim: 4" + ); + assert_eq!( + actual_field.vec_dist.as_deref(), + Some("cosine"), + "Field 'embedding' should have vec_dist: 'cosine'" + ); + } + "manufacturer" => { + assert_eq!( + actual_field.r#type, "object", + "Field 'manufacturer' should have type: 'object'" + ); + } + "tags" => { + assert_eq!( + actual_field.optional, + Some(true), + "Field 'tags' should be optional" + ); + assert_eq!( + actual_field.r#type, "string[]", + "Field 'tags' should have type 'string[]'" + ); + } + "details.part_number" => { + assert_eq!( + actual_field.facet, + Some(true), + "Flattened field 'details.part_number' should have facet: true" + ); + } + "details.weight_kg" => { + assert_eq!( + actual_field.sort, + Some(false), + "Flattened field 'details.weight_kg' should have sort: false" + ); + } + "details.desc" => { + assert!( + false, + "Flattened field 'details.desc' should have been skipped" + ); + } + + "logistics_data.warehouse_code" => { + assert_eq!(actual_field.name, "logistics_data.warehouse_code"); + } + "logistics_data.shipping_class" => { + assert_eq!(actual_field.name, "logistics_data.shipping_class"); + } + _ => { + // If we add a new field to MegaProduct, this panic will remind us to add a check for it. + if !expected_field.is_default_for_comparison() { + panic!( + "Unhandled field '{}' in test assertion. Please add a check.", + field_name + ); + } + } + } + } + // Add a helper trait to check if a field is just a default name/type pair + trait IsDefault { + fn is_default_for_comparison(&self) -> bool; + } + impl IsDefault for Field { + fn is_default_for_comparison(&self) -> bool { + self.facet.is_none() + && self.optional.is_none() + && self.index.is_none() + && self.store.is_none() + && self.sort.is_none() + && self.infix.is_none() + && self.locale.is_none() + && self.num_dim.is_none() + && self.vec_dist.is_none() + && self.range_index.is_none() + && self.stem.is_none() + } + } + + // Create Documents using the strongly-typed client + let typed_collection = client.collection_of::(&collection_name); + let documents_client = typed_collection.documents(); + + let mut product1 = MegaProduct { + id: "product-1".to_string(), + title: "Durable Steel Wrench".to_string(), + official_name: "The Wrenchmaster 3000+".to_string(), + brand: "MegaTools".to_string(), + price: 29.99, + review_score: 4.8, + internal_sku: Some("INTERNAL-123".to_string()), + location: (34.05, -118.24), + embedding: vec![0.1, 0.2, 0.3, 0.4], + details: ProductDetails { + part_number: "MT-WM-3000".to_string(), + weight_kg: 1.5, + desc: "A high-quality wrench for all your needs.".to_string(), + }, + logistics: Logistics { + warehouse_code: "WH-US-WEST-05".to_string(), + shipping_class: "GROUND_FREIGHT".to_string(), + }, + manufacturer: Manufacturer { + name: "MegaTools Inc.".to_string(), + city: "Toolsville".to_string(), + }, + tags: Some(vec!["steel".to_string(), "heavy-duty".to_string()]), + }; + + let create_res = documents_client.create(&product1, None).await; + assert!( + create_res.is_ok(), + "Failed to create typed document: {:?}", + create_res.err() + ); + // we set store: false for internal_sku so it should not be present in the response + product1.internal_sku = None; + assert_eq!(create_res.unwrap(), product1); + + // Retrieve Document and verify deserialization + let retrieve_res = typed_collection.document("product-1").retrieve().await; + assert!(retrieve_res.is_ok(), "Failed to retrieve typed document"); + assert_eq!(retrieve_res.unwrap(), product1); + + // Search and Filter (Testing attributes) + // A. Search a normal field + let search_res1: Result< + typesense::SearchResult, + typesense::Error, + > = documents_client + .search(SearchParameters { + q: Some("Wrench".to_string()), + query_by: Some("title".to_string()), + ..Default::default() + }) + .await; + assert_eq!(search_res1.unwrap().found, Some(1)); + + // B. Search a renamed field + let search_res2 = documents_client + .search(SearchParameters { + q: Some("Wrenchmaster".to_string()), + query_by: Some("product_name".to_string()), + ..Default::default() + }) + .await; + assert_eq!(search_res2.unwrap().found, Some(1)); + + // C. Filter by a facet + let search_params3 = SearchParameters { + q: Some("*".to_string()), + query_by: Some("title".to_string()), + filter_by: Some("brand:='MegaTools'".to_string()), + ..Default::default() + }; + let search_res3 = documents_client.search(search_params3).await; + assert_eq!(search_res3.unwrap().found, Some(1)); + + // D. Filter by a range_index + let search_params4 = SearchParameters { + q: Some("*".to_string()), + query_by: Some("title".to_string()), + filter_by: Some("review_score:>4.5".to_string()), + ..Default::default() + }; + let search_res4 = documents_client.search(search_params4).await; + assert_eq!(search_res4.unwrap().found, Some(1)); + + // E. Search a flattened field + let search_params5 = SearchParameters { + q: Some("MT-WM-3000".to_string()), + query_by: Some("details.part_number".to_string()), + ..Default::default() + }; + let search_res5 = documents_client.search(search_params5).await; + assert_eq!(search_res5.unwrap().found, Some(1)); + + let search_params6 = SearchParameters { + q: Some("WH-US-WEST-05".to_string()), + query_by: Some("logistics_data.warehouse_code".to_string()), + ..Default::default() + }; + let search_res6 = documents_client.search(search_params6).await; + assert_eq!( + search_res6.unwrap().found, + Some(1), + "Should find by flattened field with a custom prefix" + ); + + // Update Document (with a partial struct) + #[derive(Serialize)] + struct ProductUpdate { + price: f32, + tags: Vec, + } + let update_payload = ProductUpdate { + price: 25.99, + tags: vec!["steel".to_string(), "sale".to_string()], + }; + + let update_res = typed_collection + .document("product-1") + .update(&update_payload, None) + .await; + assert!(update_res.is_ok(), "Failed to update document"); + + // Retrieve again and check updated fields + let updated_product = typed_collection + .document("product-1") + .retrieve() + .await + .unwrap(); + assert_eq!(updated_product.price, 25.99); + assert_eq!( + updated_product.tags, + Some(vec!["steel".to_string(), "sale".to_string()]) + ); + assert_eq!(updated_product.title, product1.title); // Unchanged field + + // Delete Document + let delete_res = typed_collection.document("product-1").delete().await; + assert!(delete_res.is_ok(), "Failed to delete document"); + // Returned document should be the state before deletion + assert_eq!(delete_res.unwrap().id, "product-1"); + + // Verify Deletion + let retrieve_after_delete = typed_collection.document("product-1").retrieve().await; + assert!( + retrieve_after_delete.is_err(), + "Document should not exist after deletion" + ); +} + +// Indexing nested objects via flattening test + +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +struct ManualProductDetails { + part_number: String, + weight_kg: f32, +} + +#[derive(Typesense, Serialize, Deserialize, Debug, PartialEq, Clone)] +#[typesense( + collection_name = "manual_flat_products", + // IMPORTANT: Nested fields are disabled for this strategy. + enable_nested_fields = false +)] +struct ManualFlattenedProduct { + id: String, + title: String, + + // This field is part of the Rust struct and will be in the JSON document, + // but it will NOT be part of the Typesense schema. + #[typesense(skip)] + details: ManualProductDetails, + + // These fields represent the flattened data in the Typesense schema. + // Both `typesense(rename)` and `serde(rename)` are used to achieve the desired structure. + #[typesense(rename = "details.part_number")] + #[serde(rename = "details.part_number")] + details_part_number: String, + + #[typesense(rename = "details.weight_kg")] + #[serde(rename = "details.weight_kg")] + details_weight_kg: f32, +} + +#[tokio::test] +async fn test_manual_flattening_lifecycle() { + let client = get_client(); + let collection_name = new_id("manual_flat_test"); + + // 1. Create collection from the schema derived from `ManualFlattenedProduct` + let mut schema = ManualFlattenedProduct::collection_schema(); + schema.name = collection_name.clone(); + + // Verify the generated schema is correct *before* creating it + let schema_fields: Vec<_> = schema.fields.iter().map(|f| f.name.as_str()).collect(); + assert!( + !schema_fields.contains(&"details"), + "Schema should not contain the skipped 'details' field" + ); + assert!( + schema_fields.contains(&"details.part_number"), + "Schema must contain the renamed 'details.part_number' field" + ); + + let create_res = client.collections().create(schema).await; + assert!( + create_res.is_ok(), + "Failed to create collection: {:?}", + create_res.err() + ); + + let typed_collection = client.collection_of::(&collection_name); + + // 2. Create the document. Note how we populate all fields of the Rust struct. + let product = ManualFlattenedProduct { + id: "manual-1".to_string(), + title: "Portable Generator".to_string(), + details: ManualProductDetails { + part_number: "PG-123".to_string(), + weight_kg: 25.5, + }, + details_part_number: "PG-123".to_string(), + details_weight_kg: 25.5, + }; + + let create_res = typed_collection.documents().create(&product, None).await; + assert!( + create_res.is_ok(), + "Failed to create document with manual flattening" + ); + + // The created document in the response should be equal to our input struct. + assert_eq!(create_res.unwrap(), product); + + // 3. Retrieve and verify the document. + let retrieved_product = typed_collection + .document("manual-1") + .retrieve() + .await + .unwrap(); + assert_eq!(retrieved_product, product); + // Crucially, we can access the nested struct for display purposes, even though it wasn't indexed. + assert_eq!(retrieved_product.details.part_number, "PG-123"); + + // 4. Search using the flattened (and indexed) field. + let search_res_indexed = typed_collection + .documents() + .search(SearchParameters { + q: Some("PG-123".to_string()), + query_by: Some("details.part_number".to_string()), + ..Default::default() + }) + .await + .unwrap(); + assert_eq!( + search_res_indexed.found, + Some(1), + "Should find document by indexed flattened field" + ); +} diff --git a/typesense/tests/client/mod.rs b/typesense/tests/client/mod.rs index 078819b..d32f739 100644 --- a/typesense/tests/client/mod.rs +++ b/typesense/tests/client/mod.rs @@ -1,16 +1,17 @@ -pub mod aliases_test; -pub mod analytics_test; -pub mod client_test; -pub mod collections_test; -pub mod conversation_models_test; -pub mod documents_test; -pub mod keys_test; -pub mod multi_search_test; -pub mod presets_test; -pub mod search_overrides_test; -pub mod stemming_dictionaries_test; -pub mod stopwords_test; -pub mod synonyms_test; +mod aliases_test; +mod analytics_test; +mod client_test; +mod collections_test; +mod conversation_models_test; +mod derive_integration_test; +mod documents_test; +mod keys_test; +mod multi_search_test; +mod presets_test; +mod search_overrides_test; +mod stemming_dictionaries_test; +mod stopwords_test; +mod synonyms_test; use reqwest::Url; use reqwest_retry::policies::ExponentialBackoff; @@ -24,9 +25,9 @@ pub fn get_client() -> Client { nodes: vec![Url::parse("http://localhost:8108").unwrap()], nearest_node: None, api_key: "xyz".to_string(), - healthcheck_interval: Duration::from_secs(60), - retry_policy: ExponentialBackoff::builder().build_with_max_retries(3), - connection_timeout: Duration::from_secs(10), + healthcheck_interval: Duration::from_secs(5), + retry_policy: ExponentialBackoff::builder().build_with_max_retries(1), + connection_timeout: Duration::from_secs(3), }; Client::new(config).unwrap() } diff --git a/typesense/tests/derive/collection.rs b/typesense/tests/derive/collection.rs deleted file mode 100644 index 205af62..0000000 --- a/typesense/tests/derive/collection.rs +++ /dev/null @@ -1,53 +0,0 @@ -use serde::{Deserialize, Serialize}; -use typesense::document::Document; -use typesense::Typesense; - -#[test] -fn derived_document_generates_schema() { - let schema = Company::collection_schema(); - - let expected = serde_json::json!( - { - "name": "companies", - "fields": [ - { - "name" : "company_name", - "type" : "string" - }, - { - "name" : "num_employees", - "type" : "int32" - }, - { - "name" : "country", - "type" : "string", - "facet" : true - }, - { - "name" : "keywords", - "type" : "string[]", - "optional" : true - } - ], - "default_sorting_field": "num_employees", - "enable_nested_fields": true - } - ); - - assert_eq!(serde_json::to_value(&schema).unwrap(), expected) -} - -#[allow(dead_code)] -#[derive(Typesense, Serialize, Deserialize)] -#[typesense( - collection_name = "companies", - default_sorting_field = "num_employees", - enable_nested_fields = true -)] -struct Company { - company_name: String, - num_employees: i32, - #[typesense(facet)] - country: String, - keywords: Option>, -} diff --git a/typesense/tests/derive/derive_collection_schema_test.rs b/typesense/tests/derive/derive_collection_schema_test.rs new file mode 100644 index 0000000..fedb8fc --- /dev/null +++ b/typesense/tests/derive/derive_collection_schema_test.rs @@ -0,0 +1,303 @@ +use std::collections::BTreeMap; + +use serde::{Deserialize, Serialize}; +use serde_json::json; +use typesense::document::Document; +use typesense::Typesense; + +// Helper to convert schema to BTreeMap for order-independent comparison +fn schema_to_map( + schema: &typesense::collection_schema::CollectionSchema, +) -> BTreeMap { + serde_json::from_value(serde_json::to_value(schema).unwrap()).unwrap() +} + +// Test 1: Basic Schema Generation + +#[allow(dead_code)] +#[derive(Typesense, Serialize, Deserialize)] +#[typesense( + collection_name = "companies", + default_sorting_field = "num_employees", + enable_nested_fields = true +)] +struct Company { + company_name: String, + num_employees: i32, + #[typesense(facet)] + country: String, + keywords: Option>, +} + +#[test] +fn derived_document_generates_basic_schema() { + let schema = Company::collection_schema(); + + let expected = json!({ + "name": "companies", + "fields": [ + { + "name": "company_name", + "type": "string" + }, + { + "name": "num_employees", + "type": "int32" + }, + { + "name": "country", + "type": "string", + "facet": true + }, + { + "name": "keywords", + "type": "string[]", + "optional": true + } + ], + "default_sorting_field": "num_employees", + "enable_nested_fields": true + }); + + assert_eq!(serde_json::to_value(&schema).unwrap(), expected); +} + +// Test 2: All Field-Level and Collection-Level Attributes + +type GeoPoint = (f32, f32); + +#[allow(dead_code)] +#[derive(Typesense, Serialize, Deserialize)] +#[typesense( + collection_name = "kitchen_sink_products", + default_sorting_field = "price", + token_separators = ["-", "/"], + symbols_to_index = ["+"] +)] +struct KitchenSinkProduct { + // Basic types and rename + #[typesense(rename = "product_name")] + name: String, + #[typesense(sort = false)] + price: f32, + + // Booleans for index, store, stem, infix, range_index + #[typesense(index = false, store = false)] + internal_id: u64, + #[typesense(stem = true, infix = true)] + description: String, + #[typesense(range_index = true)] + review_score: f32, + + // Facet and explicit optional + #[typesense(facet = true, optional = true)] + brand: String, + + // Locale and type override + #[typesense(locale = "ja")] + description_jp: String, + #[typesense(type = "geopoint")] + location: GeoPoint, + + // Vector search attributes + #[typesense(num_dim = 256, vec_dist = "cosine")] + image_embedding: Vec, + + // Auto type + #[typesense(type = "auto")] + misc_data: String, +} + +#[test] +fn derived_document_handles_all_attributes() { + let schema = KitchenSinkProduct::collection_schema(); + + let expected = json!({ + "name": "kitchen_sink_products", + "fields": [ + { "name": "product_name", "type": "string" }, + { "name": "price", "type": "float", "sort": false }, + { "name": "internal_id", "type": "int64", "index": false, "store": false }, + { "name": "description", "type": "string", "stem": true, "infix": true }, + { "name": "review_score", "type": "float", "range_index": true }, + { "name": "brand", "type": "string", "facet": true, "optional": true }, + { "name": "description_jp", "type": "string", "locale": "ja" }, + { "name": "location", "type": "geopoint" }, + { "name": "image_embedding", "type": "float[]", "num_dim": 256, "vec_dist": "cosine" }, + { "name": "misc_data", "type": "auto" } + ], + "default_sorting_field": "price", + "token_separators": ["-", "/"], + "symbols_to_index": ["+"] + }); + + assert_eq!(serde_json::to_value(&schema).unwrap(), expected); +} + +// Test 3: Nested Objects, Flattening, and Cherry-Picking + +#[derive(Typesense, Serialize, Deserialize)] +struct Address { + line_1: String, + city: String, +} + +#[derive(Typesense, Serialize, Deserialize)] +struct Profile { + #[typesense(facet = true)] + name: String, + email: Option, +} + +#[allow(dead_code)] +#[derive(Typesense, Serialize, Deserialize)] +#[typesense(collection_name = "nested_users", enable_nested_fields = true)] +struct User { + // --- Strategy 1: Indexing as an object --- + primary_address: Address, + work_addresses: Vec
, + optional_profile: Option, + + // --- Strategy 2: Flattening --- + #[typesense(flatten)] + profile: Profile, + #[typesense(flatten)] + previous_addresses: Vec
, + + // --- Strategy 3: manually flattened object --- + #[typesense(rename = "primary_address.city")] + primary_city: String, + #[typesense(rename = "work_addresses.zip", type = "string[]")] + work_zips: Vec, +} + +#[test] +fn derived_document_handles_nested_and_flattened_fields() { + let schema = User::collection_schema(); + + let expected = json!({ + "name": "nested_users", + "enable_nested_fields": true, + "fields": [ + // --- Strategy 1: Object Indexing --- + { "name": "primary_address", "type": "object" }, + { "name": "work_addresses", "type": "object[]" }, + { "name": "optional_profile", "type": "object", "optional": true }, + + // --- Strategy 2: Flattened fields --- + { "name": "profile.name", "type": "string", "facet": true }, + { "name": "profile.email", "type": "string", "optional": true }, + { "name": "previous_addresses.line_1", "type": "string[]" }, + { "name": "previous_addresses.city", "type": "string[]" }, + + // --- Strategy 3: manually flattened object --- + { "name": "primary_address.city", "type": "string" }, + { "name": "work_addresses.zip", "type": "string[]" } + ] + }); + + // Using BTreeMap to allow comparing JSON without worrying about field order + let expected_map: std::collections::BTreeMap = + serde_json::from_value(expected).unwrap(); + let actual_map: std::collections::BTreeMap = + serde_json::from_value(serde_json::to_value(&schema).unwrap()).unwrap(); + + assert_eq!(actual_map, expected_map); +} + +// Test 4: All Boolean Shorthand Attributes + +#[allow(dead_code)] +#[derive(Typesense, Serialize, Deserialize)] +#[typesense(collection_name = "shorthand_products")] +struct ShorthandProduct { + // Shorthand for facet = true + #[typesense(facet)] + brand: String, + + // Shorthand for sort = true + #[typesense(sort)] + name: String, + + // Shorthand for index = true + #[typesense(index)] + category: String, + + // Shorthand for store = true + #[typesense(store)] + description: String, + + // Shorthand for infix = true + #[typesense(infix)] + tags: String, + + // Shorthand for stem = true + #[typesense(stem)] + title: String, + + // Shorthand for range_index = true + #[typesense(range_index)] + price: f32, + + // Shorthand for optional = true, overriding the non-Option type + #[typesense(optional)] + variant: String, +} + +#[test] +fn derived_document_handles_boolean_shorthand() { + let schema = ShorthandProduct::collection_schema(); + + let expected = json!({ + "name": "shorthand_products", + "fields": [ + { "name": "brand", "type": "string", "facet": true }, + { "name": "name", "type": "string", "sort": true }, + { "name": "category", "type": "string", "index": true }, + { "name": "description", "type": "string", "store": true }, + { "name": "tags", "type": "string", "infix": true }, + { "name": "title", "type": "string", "stem": true }, + { "name": "price", "type": "float", "range_index": true }, + { "name": "variant", "type": "string", "optional": true } + ] + }); + + // Using BTreeMap to allow comparing JSON without worrying about field order + let expected_map: std::collections::BTreeMap = + serde_json::from_value(expected).unwrap(); + let actual_map: std::collections::BTreeMap = + serde_json::from_value(serde_json::to_value(&schema).unwrap()).unwrap(); + + assert_eq!(actual_map, expected_map); +} + +#[allow(dead_code)] +#[derive(Typesense, Serialize, Deserialize)] +#[typesense(collection_name = "skipped_field_products")] +struct SkippedFieldProduct { + // This field will be in the schema + product_id: String, + + // This field is for internal Rust logic only and should NOT be in the schema + #[typesense(skip)] + internal_metadata: String, + + // This field will also be in the schema + price: i32, +} + +#[test] +fn derived_document_handles_skipped_field() { + let schema = SkippedFieldProduct::collection_schema(); + let expected_map: BTreeMap = serde_json::from_value(json!({ + "name": "skipped_field_products", + "fields": [ + { "name": "product_id", "type": "string" }, + { "name": "price", "type": "int32" } + // Note: `internal_metadata` is correctly omitted from the fields array + ] + })) + .unwrap(); + + assert_eq!(schema_to_map(&schema), expected_map); +} diff --git a/typesense/tests/derive/lib.rs b/typesense/tests/derive/lib.rs index 8a97463..e0561b0 100644 --- a/typesense/tests/derive/lib.rs +++ b/typesense/tests/derive/lib.rs @@ -2,4 +2,4 @@ mod compile; #[cfg(feature = "typesense_derive")] -mod collection; +mod derive_collection_schema_test; diff --git a/typesense/tests/derive/ui/duplicated_attribute.stderr b/typesense/tests/derive/ui/duplicated_attribute.stderr index 454650a..a9d2519 100644 --- a/typesense/tests/derive/ui/duplicated_attribute.stderr +++ b/typesense/tests/derive/ui/duplicated_attribute.stderr @@ -1,7 +1,11 @@ -error: #[typesense(facet)] repeated more than one time. - --> $DIR/duplicated_attribute.rs:7:5 +error: Original `#[typesense(...)]` attribute here. + --> tests/derive/ui/duplicated_attribute.rs:7:5 | -7 | / #[typesense(facet)] -8 | | #[typesense(facet)] -9 | | country_code: String, - | |________________________^ +7 | #[typesense(facet)] + | ^^^^^^^^^^^^^^^^^^^ + +error: Attribute is duplicated here. + --> tests/derive/ui/duplicated_attribute.rs:8:5 + | +8 | #[typesense(facet)] + | ^^^^^^^^^^^^^^^^^^^ diff --git a/typesense/tests/derive/ui/unknown_attribute.stderr b/typesense/tests/derive/ui/unknown_attribute.stderr index 385b665..0d5ba83 100644 --- a/typesense/tests/derive/ui/unknown_attribute.stderr +++ b/typesense/tests/derive/ui/unknown_attribute.stderr @@ -1,5 +1,5 @@ -error: Unexpected token facets. Did you mean `facet`? - --> $DIR/unknown_attribute.rs:8:17 +error: Unexpected field attribute "facets" + --> tests/derive/ui/unknown_attribute.rs:8:17 | 8 | #[typesense(facets)] | ^^^^^^ diff --git a/typesense_derive/src/field_attrs.rs b/typesense_derive/src/field_attrs.rs new file mode 100644 index 0000000..b6d9720 --- /dev/null +++ b/typesense_derive/src/field_attrs.rs @@ -0,0 +1,413 @@ +use crate::{skip_eq, string_literal}; +use proc_macro2::TokenTree; +use quote::quote; +use syn::{spanned::Spanned, Attribute, Field}; + +#[derive(Default)] +struct FieldAttrs { + type_override: Option, + facet: Option, + index: Option, + locale: Option, + sort: Option, + infix: Option, + num_dim: Option, + optional: Option, + store: Option, + stem: Option, + range_index: Option, + vec_dist: Option, + flatten: bool, + rename: Option, + skip: bool, +} +// Helper to parse a boolean literal (true/false) +fn bool_literal(tt_iter: &mut impl Iterator) -> syn::Result { + match tt_iter.next() { + Some(TokenTree::Ident(i)) => { + if i == "true" { + Ok(true) + } else if i == "false" { + Ok(false) + } else { + Err(syn::Error::new_spanned( + i, + "Expected a boolean `true` or `false`", + )) + } + } + tt => Err(syn::Error::new(tt.span(), "Expected a boolean literal")), + } +} + +// Helper to parse an integer literal +fn i32_literal(tt_iter: &mut impl Iterator) -> syn::Result { + match tt_iter.next() { + Some(TokenTree::Literal(l)) => { + let lit = syn::Lit::new(l); + if let syn::Lit::Int(i) = lit { + i.base10_parse::() + } else { + Err(syn::Error::new_spanned( + lit, + "it must be equal to an integer literal", + )) + } + } + tt => Err(syn::Error::new(tt.span(), "Expected an integer literal")), + } +} + +// This function will parse #[typesense(...)] on a FIELD +fn extract_field_attrs(attrs: &[Attribute]) -> syn::Result { + let mut res = FieldAttrs::default(); + + // Find the single #[typesense] attribute, erroring if there are more than one. + let all_ts_attrs: Vec<&Attribute> = attrs + .iter() + .filter(|a| a.path.get_ident().map_or(false, |i| i == "typesense")) + .collect(); + + // Check for duplicates and create a rich, multi-span error if found + if all_ts_attrs.len() > 1 { + // Create the first error pointing to the original attribute + let mut err = syn::Error::new_spanned( + all_ts_attrs[0], + "Original `#[typesense(...)]` attribute here.", + ); + + // Combine it with a second error pointing to the duplicate + err.combine(syn::Error::new_spanned( + all_ts_attrs[1], + "Attribute is duplicated here.", + )); + + return Err(err); + } + // Get the single attribute, or return default if none exist + + let attr = if let Some(a) = all_ts_attrs.first() { + *a + } else { + return Ok(res); // No typesense attribute, return default + }; + + if let Some(TokenTree::Group(g)) = attr.tokens.clone().into_iter().next() { + let mut tt_iter = g.stream().into_iter().peekable(); + while let Some(tt) = tt_iter.next() { + if let TokenTree::Ident(i) = tt { + let is_shorthand = + tt_iter.peek().is_none() || tt_iter.peek().unwrap().to_string() == ","; + let ident_str = i.to_string(); + + match ident_str.as_str() { + // --- Boolean flags that support shorthand and key-value --- + "facet" | "sort" | "index" | "store" | "infix" | "stem" | "range_index" + | "optional" => { + let value = if is_shorthand { + true + } else { + skip_eq(&i, &mut tt_iter)?; + bool_literal(&mut tt_iter)? + }; + + // Set the correct field on the result struct, checking for duplicates + match ident_str.as_str() { + "facet" => { + if res.facet.is_some() { + return Err(syn::Error::new_spanned( + &i, + "Attribute `facet` is duplicated", + )); + } + res.facet = Some(value); + } + "sort" => { + if res.sort.is_some() { + return Err(syn::Error::new_spanned( + &i, + "Attribute `sort` is duplicated", + )); + } + res.sort = Some(value); + } + "index" => { + if res.index.is_some() { + return Err(syn::Error::new_spanned( + &i, + "Attribute `index` is duplicated", + )); + } + res.index = Some(value); + } + "store" => { + if res.store.is_some() { + return Err(syn::Error::new_spanned( + &i, + "Attribute `store` is duplicated", + )); + } + res.store = Some(value); + } + "infix" => { + if res.infix.is_some() { + return Err(syn::Error::new_spanned( + &i, + "Attribute `infix` is duplicated", + )); + } + res.infix = Some(value); + } + "stem" => { + if res.stem.is_some() { + return Err(syn::Error::new_spanned( + &i, + "Attribute `stem` is duplicated", + )); + } + res.stem = Some(value); + } + "range_index" => { + if res.range_index.is_some() { + return Err(syn::Error::new_spanned( + &i, + "Attribute `range_index` is duplicated", + )); + } + res.range_index = Some(value); + } + "optional" => { + if res.optional.is_some() { + return Err(syn::Error::new_spanned( + &i, + "Attribute `optional` is duplicated", + )); + } + res.optional = Some(value); + } + _ => unreachable!(), + } + } + // --- Flags that are ONLY shorthand --- + "flatten" | "skip" => { + if !is_shorthand { + return Err(syn::Error::new(i.span(), format!("`{}` is a flag and does not take a value. Use `#[typesense({})]`", ident_str, ident_str))); + } + match ident_str.as_str() { + "flatten" => { + if res.flatten { + return Err(syn::Error::new_spanned( + &i, + "Attribute `flatten` is duplicated", + )); + } + res.flatten = true; + } + "skip" => { + // Add this block + if res.skip { + return Err(syn::Error::new_spanned( + &i, + "Attribute `skip` is duplicated", + )); + } + res.skip = true; + } + _ => unreachable!(), + } + } + + // --- Key-value only attributes --- + "rename" => { + skip_eq(&i, &mut tt_iter)?; + if res.rename.is_some() { + return Err(syn::Error::new_spanned( + &i, + "Attribute `rename` is duplicated", + )); + } + res.rename = Some(string_literal(&mut tt_iter)?); + } + "locale" => { + skip_eq(&i, &mut tt_iter)?; + if res.locale.is_some() { + return Err(syn::Error::new_spanned( + &i, + "Attribute `locale` is duplicated", + )); + } + res.locale = Some(string_literal(&mut tt_iter)?); + } + "vec_dist" => { + skip_eq(&i, &mut tt_iter)?; + if res.vec_dist.is_some() { + return Err(syn::Error::new_spanned( + &i, + "Attribute `vec_dist` is duplicated", + )); + } + res.vec_dist = Some(string_literal(&mut tt_iter)?); + } + "type" => { + skip_eq(&i, &mut tt_iter)?; + if res.type_override.is_some() { + return Err(syn::Error::new_spanned( + &i, + "Attribute `type` is duplicated", + )); + } + res.type_override = Some(string_literal(&mut tt_iter)?); + } + "num_dim" => { + skip_eq(&i, &mut tt_iter)?; + if res.num_dim.is_some() { + return Err(syn::Error::new_spanned( + &i, + "Attribute `num_dim` is duplicated", + )); + } + res.num_dim = Some(i32_literal(&mut tt_iter)?); + } + // --- Error for unknown attributes --- + v => { + return Err(syn::Error::new( + i.span(), + format!("Unexpected field attribute \"{}\"", v), + )); + } + } + }; + + if let Some(TokenTree::Punct(p)) = tt_iter.peek() { + if p.as_char() == ',' { + tt_iter.next(); // Consume the comma + } + } + } + } + + Ok(res) +} + +// Get the inner type for a given wrappper +fn ty_inner_type<'a>(ty: &'a syn::Type, wrapper: &'static str) -> Option<&'a syn::Type> { + if let syn::Type::Path(ref p) = ty { + if p.path.segments.len() == 1 && p.path.segments[0].ident == wrapper { + if let syn::PathArguments::AngleBracketed(ref inner_ty) = p.path.segments[0].arguments { + if inner_ty.args.len() == 1 { + // len is 1 so this should not fail + let inner_ty = inner_ty.args.first().unwrap(); + if let syn::GenericArgument::Type(ref t) = inner_ty { + return Some(t); + } + } + } + } + } + None +} + +/// Helper to get the inner-most type from nested Option/Vec wrappers. +fn get_inner_type(mut ty: &syn::Type) -> &syn::Type { + while let Some(inner) = ty_inner_type(ty, "Option").or_else(|| ty_inner_type(ty, "Vec")) { + ty = inner; + } + ty +} + +/// Processes a single struct field. +/// Returns a TokenStream which evaluates to a `Vec`. +pub fn process_field(field: &Field) -> syn::Result { + let field_attrs = extract_field_attrs(&field.attrs)?; + + // If the `skip` attribute is present, produce an empty Vec of fields, + // effectively removing it from the schema. + if field_attrs.skip { + return Ok(quote! { + vec![] + }); + } + + let name_ident = &field.ident; + + if field_attrs.flatten { + // --- FLATTEN LOGIC --- + + // Determine the prefix: use the rename value if it exists, otherwise use the field's name. + let prefix = if let Some(rename_prefix) = &field_attrs.rename { + quote! { #rename_prefix } + } else { + let name_ident = &field.ident; + quote! { stringify!(#name_ident) } + }; + + let inner_type = get_inner_type(&field.ty); + let is_vec = ty_inner_type(&field.ty, "Vec").is_some() + || ty_inner_type(&field.ty, "Option") + .map_or(false, |t| ty_inner_type(t, "Vec").is_some()); + + Ok(quote! { + { + <#inner_type as typesense::document::Document>::collection_schema().fields + .into_iter() + .map(|mut f| { + // Use the dynamically determined prefix here + f.name = format!("{}.{}", #prefix, f.name); + if #is_vec && !f.r#type.ends_with("[]") { + f.r#type.push_str("[]"); + } + f + }) + .collect::>() + } + }) + } else { + // --- REGULAR FIELD LOGIC --- + let (ty, is_option_type) = if let Some(inner_ty) = ty_inner_type(&field.ty, "Option") { + (inner_ty, true) + } else { + (&field.ty, false) + }; + + let field_name = if let Some(rename) = &field_attrs.rename { + quote! { #rename.to_string() } + } else { + quote! { std::string::String::from(stringify!(#name_ident)) } + }; + + let typesense_field_type = if let Some(override_str) = &field_attrs.type_override { + quote! { #override_str.to_owned() } + } else { + quote! { <#ty as typesense::field::ToTypesenseField>::to_typesense_type().to_owned() } + }; + + let optional = field_attrs + .optional + .or(if is_option_type { Some(true) } else { None }) + .map(|v| quote!(.optional(Some(#v)))); + let facet = field_attrs.facet.map(|v| quote!(.facet(Some(#v)))); + let index = field_attrs.index.map(|v| quote!(.index(Some(#v)))); + let store = field_attrs.store.map(|v| quote!(.store(Some(#v)))); + let sort = field_attrs.sort.map(|v| quote!(.sort(Some(#v)))); + let infix = field_attrs.infix.map(|v| quote!(.infix(Some(#v)))); + let stem = field_attrs.stem.map(|v| quote!(.stem(Some(#v)))); + let range_index = field_attrs + .range_index + .map(|v| quote!(.range_index(Some(#v)))); + let locale = field_attrs + .locale + .map(|v| quote!(.locale(Some(#v.to_string())))); + let vec_dist = field_attrs + .vec_dist + .map(|v| quote!(.vec_dist(Some(#v.to_string())))); + let num_dim = field_attrs.num_dim.map(|v| quote!(.num_dim(Some(#v)))); + + Ok(quote! { + vec![ + typesense::field::FieldBuilder::new(#field_name, #typesense_field_type) + #optional #facet #index #store #sort #infix #stem #range_index #locale #vec_dist #num_dim + .build() + ] + }) + } +} diff --git a/typesense_derive/src/lib.rs b/typesense_derive/src/lib.rs index 009cda0..6810551 100644 --- a/typesense_derive/src/lib.rs +++ b/typesense_derive/src/lib.rs @@ -1,7 +1,9 @@ use proc_macro::TokenStream; use proc_macro2::{Ident, TokenTree}; use quote::{quote, ToTokens}; -use syn::{spanned::Spanned, Attribute, Field, ItemStruct}; +use syn::{spanned::Spanned, Attribute, ItemStruct}; +mod field_attrs; +use field_attrs::process_field; #[proc_macro_derive(Typesense, attributes(typesense))] pub fn typesense_collection_derive(input: TokenStream) -> TokenStream { @@ -40,6 +42,8 @@ fn impl_typesense_collection(item: ItemStruct) -> syn::Result { collection_name, default_sorting_field, enable_nested_fields, + symbols_to_index, + token_separators, } = extract_attrs(attrs)?; let collection_name = collection_name.unwrap_or_else(|| ident.to_string().to_lowercase()); @@ -58,9 +62,10 @@ fn impl_typesense_collection(item: ItemStruct) -> syn::Result { } } + // Use flat_map to handle fields that expand into multiple schema fields let typesense_fields = fields .iter() - .map(to_typesense_field_type) + .map(|field| process_field(field)) // process_field returns a Result .collect::>>()?; let default_sorting_field = if let Some(v) = default_sorting_field { @@ -79,16 +84,38 @@ fn impl_typesense_collection(item: ItemStruct) -> syn::Result { proc_macro2::TokenStream::new() }; + let symbols_to_index = if let Some(v) = symbols_to_index { + quote! { + builder = builder.symbols_to_index(vec![#(#v.to_string()),*]); + } + } else { + proc_macro2::TokenStream::new() + }; + + let token_separators = if let Some(v) = token_separators { + quote! { + builder = builder.token_separators(vec![#(#v.to_string()),*]); + } + } else { + proc_macro2::TokenStream::new() + }; + let gen = quote! { impl #impl_generics typesense::document::Document for #ident #ty_generics #where_clause { fn collection_schema() -> typesense::collection_schema::CollectionSchema { let name = #collection_name.to_owned(); - let fields = vec![#(#typesense_fields,)*]; + + // Collect fields from all sources + let fields: Vec = vec![ + #(#typesense_fields,)* + ].into_iter().flatten().collect(); let mut builder = typesense::collection_schema::CollectionSchemaBuilder::new(name, fields); #default_sorting_field #enable_nested_fields + #token_separators + #symbols_to_index builder.build() } @@ -97,24 +124,6 @@ fn impl_typesense_collection(item: ItemStruct) -> syn::Result { Ok(gen.into()) } -// Get the inner type for a given wrappper -fn ty_inner_type<'a>(ty: &'a syn::Type, wrapper: &'static str) -> Option<&'a syn::Type> { - if let syn::Type::Path(ref p) = ty { - if p.path.segments.len() == 1 && p.path.segments[0].ident == wrapper { - if let syn::PathArguments::AngleBracketed(ref inner_ty) = p.path.segments[0].arguments { - if inner_ty.args.len() == 1 { - // len is 1 so this should not fail - let inner_ty = inner_ty.args.first().unwrap(); - if let syn::GenericArgument::Type(ref t) = inner_ty { - return Some(t); - } - } - } - } - } - None -} - // Add a bound `T: ToTypesenseField` to every type parameter T. fn add_trait_bounds(mut generics: syn::Generics) -> syn::Generics { for param in &mut generics.params { @@ -131,10 +140,12 @@ fn add_trait_bounds(mut generics: syn::Generics) -> syn::Generics { struct Attrs { collection_name: Option, default_sorting_field: Option, + symbols_to_index: Option>, enable_nested_fields: Option, + token_separators: Option>, } -fn skip_eq(i: Ident, tt_iter: &mut impl Iterator) -> syn::Result<()> { +fn skip_eq(i: &Ident, tt_iter: &mut impl Iterator) -> syn::Result<()> { match tt_iter.next() { Some(TokenTree::Punct(p)) if p.as_char() == '=' => Ok(()), Some(tt) => Err(syn::Error::new_spanned( @@ -183,15 +194,15 @@ fn extract_attrs(attrs: Vec) -> syn::Result { if let TokenTree::Ident(i) = tt { match &i.to_string() as &str { "collection_name" => { - skip_eq(i, &mut tt_iter)?; + skip_eq(&i, &mut tt_iter)?; res.collection_name = Some(string_literal(&mut tt_iter)?); } "default_sorting_field" => { - skip_eq(i, &mut tt_iter)?; + skip_eq(&i, &mut tt_iter)?; res.default_sorting_field = Some(string_literal(&mut tt_iter)?); } "enable_nested_fields" => { - skip_eq(i, &mut tt_iter)?; + skip_eq(&i, &mut tt_iter)?; let val = match tt_iter.next() { Some(TokenTree::Ident(i)) => &i.to_string() == "true", tt => { @@ -203,6 +214,14 @@ fn extract_attrs(attrs: Vec) -> syn::Result { }; res.enable_nested_fields = Some(val); } + "symbols_to_index" => { + skip_eq(&i, &mut tt_iter)?; + res.symbols_to_index = Some(string_list(&mut tt_iter)?); + } + "token_separators" => { + skip_eq(&i, &mut tt_iter)?; + res.token_separators = Some(string_list(&mut tt_iter)?); + } v => { return Err(syn::Error::new(i.span(), format!("Unexpected \"{}\"", v))); } @@ -223,78 +242,46 @@ fn extract_attrs(attrs: Vec) -> syn::Result { Ok(res) } -/// Convert a given field in a typesense field type. -fn to_typesense_field_type(field: &Field) -> syn::Result { - let name = &field.ident; +// Helper function to parse a bracketed list of string literals +fn string_list(tt_iter: &mut impl Iterator) -> syn::Result> { + let group = match tt_iter.next() { + Some(TokenTree::Group(g)) if g.delimiter() == proc_macro2::Delimiter::Bracket => g, + Some(tt) => { + return Err(syn::Error::new_spanned( + tt, + "Expected a list in brackets `[]`", + )) + } + None => { + return Err(syn::Error::new( + proc_macro2::Span::call_site(), + "Expected a list in brackets `[]`", + )) + } + }; - let facet = { - let facet_vec = field - .attrs - .iter() - .filter_map(|attr| { - if attr.path.segments.len() == 1 && attr.path.segments[0].ident == "typesense" { - if let Some(proc_macro2::TokenTree::Group(g)) = - attr.tokens.clone().into_iter().next() - { - let mut tokens = g.stream().into_iter(); - match tokens.next() { - Some(proc_macro2::TokenTree::Ident(ref i)) => { - if i != "facet" { - return Some(Err(syn::Error::new_spanned( - i, - format!("Unexpected token {}. Did you mean `facet`?", i), - ))); - } - } - Some(ref tt) => { - return Some(Err(syn::Error::new_spanned( - tt, - format!("Unexpected token {}. Did you mean `facet`?", tt), - ))) - } - None => { - return Some(Err(syn::Error::new_spanned(attr, "expected `facet`"))) - } - } + let mut result = Vec::new(); + let mut inner_iter = group.stream().into_iter().peekable(); - if let Some(ref tt) = tokens.next() { - return Some(Err(syn::Error::new_spanned( - tt, - "Unexpected token. Expected )", - ))); - } - return Some(Ok(())); - } - } - None - }) - .collect::>>()?; - let facet_count = facet_vec.len(); - if facet_count == 1 { - quote!(Some(true)) - } else if facet_count == 0 { - quote!(None) + while let Some(tt) = inner_iter.next() { + if let TokenTree::Literal(l) = tt { + let lit = syn::Lit::new(l); + if let syn::Lit::Str(s) = lit { + result.push(s.value()); + } else { + return Err(syn::Error::new_spanned(lit, "Expected a string literal")); + } } else { - return Err(syn::Error::new_spanned( - field, - "#[typesense(facet)] repeated more than one time.", - )); + return Err(syn::Error::new_spanned(tt, "Expected a string literal")); } - }; - let (ty, optional) = if let Some(inner_ty) = ty_inner_type(&field.ty, "Option") { - (inner_ty, quote!(Some(true))) - } else { - (&field.ty, quote!(None)) - }; - let typesense_field_type = quote!( - <#ty as typesense::field::ToTypesenseField>::to_typesense_type().to_owned() - ); + // Check for a trailing comma + if let Some(TokenTree::Punct(p)) = inner_iter.peek() { + if p.as_char() == ',' { + inner_iter.next(); // Consume the comma + } + } + } - Ok(quote! { - typesense::field::FieldBuilder::new(std::string::String::from(stringify!(#name)), #typesense_field_type) - .optional(#optional) - .facet(#facet) - .build() - }) + Ok(result) } From 3bc15f76d2fc317939cecfbabb0255b959c8f209 Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Fri, 15 Aug 2025 11:04:25 +0700 Subject: [PATCH 11/21] feat: add builders for collection schema and collection fields using bon; restructure library layout --- typesense/Cargo.toml | 3 + typesense/src/builders/collection_field.rs | 54 +++++++ typesense/src/builders/collection_schema.rs | 137 ++++++++++++++++ typesense/src/builders/mod.rs | 6 + typesense/src/client/multi_search.rs | 2 +- typesense/src/collection_schema.rs | 136 ---------------- typesense/src/error.rs | 2 + typesense/src/field/mod.rs | 147 ------------------ typesense/src/lib.rs | 14 +- typesense/src/prelude.rs | 20 +-- typesense/src/{ => traits}/document.rs | 0 typesense/src/{field => traits}/field_type.rs | 6 +- typesense/src/traits/mod.rs | 9 ++ typesense/src/traits/multi_search_ext.rs | 18 +++ .../tests/client/derive_integration_test.rs | 3 +- .../derive/derive_collection_schema_test.rs | 4 +- typesense_derive/src/field_attrs.rs | 36 ++--- typesense_derive/src/lib.rs | 19 +-- 18 files changed, 271 insertions(+), 345 deletions(-) create mode 100644 typesense/src/builders/collection_field.rs create mode 100644 typesense/src/builders/collection_schema.rs create mode 100644 typesense/src/builders/mod.rs delete mode 100644 typesense/src/collection_schema.rs delete mode 100644 typesense/src/field/mod.rs rename typesense/src/{ => traits}/document.rs (100%) rename typesense/src/{field => traits}/field_type.rs (95%) create mode 100644 typesense/src/traits/mod.rs create mode 100644 typesense/src/traits/multi_search_ext.rs diff --git a/typesense/Cargo.toml b/typesense/Cargo.toml index ed271b3..111346d 100644 --- a/typesense/Cargo.toml +++ b/typesense/Cargo.toml @@ -29,6 +29,9 @@ reqwest-retry = "0.7.0" reqwest = { version = "0.12", features = ["json"] } reqwest-middleware = { version = "0.4.2", features = ["json"] } thiserror = "1.0" +bon = "3.7.0" +strum = { version = "0.26", features = ["derive"] } + [dev-dependencies] dotenvy = "0.15" diff --git a/typesense/src/builders/collection_field.rs b/typesense/src/builders/collection_field.rs new file mode 100644 index 0000000..2c92842 --- /dev/null +++ b/typesense/src/builders/collection_field.rs @@ -0,0 +1,54 @@ +//! Module with the common definitions for the +//! [fields](https://github.com/typesense/typesense/blob/v0.19.0/include/field.) +//! available in Typesense. + +use crate::traits::FieldType; +use bon::builder; +use typesense_codegen::models::{Field as TypesenseField, FieldEmbed}; + +/// Creates a new [`TypesenseField`] builder. +#[builder( + // expose a public builder type named `FieldBuilder` and a public finish_fn `build()` + builder_type(name = FieldBuilder, vis = "pub"), + finish_fn(name = build, vis = "pub"), + // allow passing &str into String params + on(String, into) +)] +pub fn new_collection_field( + #[builder(start_fn)] name: String, + + #[builder(start_fn)] typesense_type: FieldType, + + optional: Option, + facet: Option, + index: Option, + locale: Option, + sort: Option, + infix: Option, + num_dim: Option, + drop: Option, + embed: Option>, + store: Option, + stem: Option, + range_index: Option, + vec_dist: Option, +) -> TypesenseField { + TypesenseField { + name, + r#type: typesense_type, + optional, + facet, + index, + locale, + sort, + infix, + num_dim, + drop, + embed, + store, + stem, + range_index, + vec_dist, + ..Default::default() + } +} diff --git a/typesense/src/builders/collection_schema.rs b/typesense/src/builders/collection_schema.rs new file mode 100644 index 0000000..b2cb0dd --- /dev/null +++ b/typesense/src/builders/collection_schema.rs @@ -0,0 +1,137 @@ +//! # Collection +//! +//! In Typesense, a group of related documents is called a collection. A collection +//! is roughly equivalent to a table in a relational database. +//! +use bon::builder; +use typesense_codegen::models::{CollectionSchema, Field, VoiceQueryModelCollectionConfig}; + +/// Creates a new [`CollectionSchema`] builder. +/// +/// In Typesense, a collection is a group of related documents, similar to a table +/// in a relational database. This builder enforces that `name` must be provided +/// before [`build`](CollectionSchemaBuilder::build) can be called. +/// +/// # Example +/// +/// ``` +/// use typesense::builders::new_collection_schema; +/// let fields = vec![]; +/// let schema = new_collection_schema("companies", fields) +/// .default_sorting_field("num_employees") +/// .build(); +/// ``` +#[builder( + builder_type(name = CollectionSchemaBuilder, vis = "pub"), + finish_fn(name = build, vis = "pub"), + state_mod(name = collection_schema_builder, vis = "pub"), + on(String, into) +)] +pub fn new_collection_schema( + /// The name of the collection. Must be unique within the Typesense instance. + #[builder(start_fn)] + name: String, + + /// The list of fields that describe the schema of documents in this collection. + #[builder(start_fn)] + fields: Vec, + + /// The name of the default sorting field for the collection. + default_sorting_field: Option, + + /// A list of token separators to use when indexing text fields. + token_separators: Option>, + + /// Whether nested fields are enabled. + enable_nested_fields: Option, + + /// Symbols that should be indexed for this collection. + symbols_to_index: Option>, + + /// Configuration for Typesense’s Voice Query Model. + voice_query_model: Option>, +) -> CollectionSchema { + CollectionSchema { + name, + fields, + default_sorting_field, + token_separators, + enable_nested_fields, + symbols_to_index, + voice_query_model, + } +} + +// custom convenience methods; the typestate module name matches `state_mod` +impl CollectionSchemaBuilder { + /// Adds a single [`Field`] to the collection schema. + /// + /// This is a convenience method for pushing one field at a time. + pub fn field(mut self, field: Field) -> Self { + self.fields.push(field); + self + } + + /// Adds multiple [`Field`] values to the collection schema. + /// + /// This is a convenience method for appending a slice of fields. + pub fn fields(mut self, fields: &[Field]) -> Self + where + Field: Clone, + { + self.fields.extend_from_slice(fields); + self + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::builders::new_collection_field; + use serde_json::json; + + #[test] + fn collection_schema_serializes_as_expected() { + let fields = [ + ("company_name", "string".to_owned(), None), + ("num_employees", "int32".to_owned(), None), + ("country", "string".to_owned(), Some(true)), + ] + .map(|(name, typesense_type, facet)| { + if facet.is_some() { + new_collection_field(name, typesense_type.into()) + .facet(facet.unwrap()) + .build() + } else { + new_collection_field(name, typesense_type.into()).build() + } + }); + + let collection: CollectionSchema = + new_collection_schema("companies", fields.clone().to_vec()) + .fields(&fields) + .field(new_collection_field("size", "string".into()).build()) + .default_sorting_field("num_employees") + .build(); + + let expected = json!( + { + "name": "companies", + "fields": [ + { "name": "company_name", "type": "string" }, + { "name": "num_employees", "type": "int32" }, + { "name": "country", "type": "string", "facet": true }, + + { "name": "company_name", "type": "string" }, + { "name": "num_employees", "type": "int32" }, + { "name": "country", "type": "string", "facet": true }, + + { "name": "size", "type": "string" }, + ], + "default_sorting_field": "num_employees" + } + ); + + assert_eq!(serde_json::to_value(&collection).unwrap(), expected) + } +} diff --git a/typesense/src/builders/mod.rs b/typesense/src/builders/mod.rs new file mode 100644 index 0000000..b6a0333 --- /dev/null +++ b/typesense/src/builders/mod.rs @@ -0,0 +1,6 @@ +//! Contain convenient builders for Typesense schemas. + +mod collection_field; +mod collection_schema; +pub use collection_field::new_collection_field; +pub use collection_schema::new_collection_schema; diff --git a/typesense/src/client/multi_search.rs b/typesense/src/client/multi_search.rs index 37386db..c6c011e 100644 --- a/typesense/src/client/multi_search.rs +++ b/typesense/src/client/multi_search.rs @@ -3,7 +3,7 @@ //! A `MultiSearch` instance is created via the main `Client::multi_search()` method. use crate::{ - models::SearchResult, Client, Error, MultiSearchParseError, MultiSearchResultExt, + models::SearchResult, traits::MultiSearchResultExt, Client, Error, MultiSearchParseError, MultiSearchSearchesParameter, }; use serde::de::DeserializeOwned; diff --git a/typesense/src/collection_schema.rs b/typesense/src/collection_schema.rs deleted file mode 100644 index 0951e76..0000000 --- a/typesense/src/collection_schema.rs +++ /dev/null @@ -1,136 +0,0 @@ -//! # Collection -//! -//! In Typesense, a group of related documents is called a collection. A collection -//! is roughly equivalent to a table in a relational database. -//! -use crate::field::Field; - -pub use typesense_codegen::models::CollectionSchema; - -/// Builder for the [CollectionSchema] struct. -#[derive(Debug, Default)] -pub struct CollectionSchemaBuilder { - name: String, - fields: Vec, - default_sorting_field: Option, - token_separators: Option>, - enable_nested_fields: Option, - symbols_to_index: Option>, -} - -impl CollectionSchemaBuilder { - /// Create a builder for [CollectionSchema] - #[inline] - pub fn new(name: impl Into, fields: Vec) -> Self { - Self { - name: name.into(), - fields, - ..Default::default() - } - } - - /// Insert field - #[inline] - pub fn field(mut self, field: Field) -> Self { - self.fields.push(field); - self - } - - /// Set fields - #[inline] - pub fn fields(mut self, fields: &[Field]) -> Self { - self.fields.extend_from_slice(fields); - self - } - - /// Set default sorting field - #[inline] - pub fn default_sorting_field(mut self, default_sorting_field: impl Into) -> Self { - self.default_sorting_field = Some(default_sorting_field.into()); - self - } - - /// Set token separators - #[inline] - pub fn token_separators(mut self, token_separators: Vec) -> Self { - self.token_separators = Some(token_separators); - self - } - - /// Enable nested fields - #[inline] - pub fn enable_nested_fields(mut self, enable_nested_fields: Option) -> Self { - self.enable_nested_fields = enable_nested_fields; - self - } - - /// Set symbols to index - #[inline] - pub fn symbols_to_index(mut self, symbols_to_index: Vec) -> Self { - self.symbols_to_index = Some(symbols_to_index); - self - } - - /// Create a `CollectionSchema` with the current values of the builder, - /// It can fail if any of the required fields is not not defined. - #[inline] - pub fn build(self) -> CollectionSchema { - CollectionSchema { - name: self.name, - fields: self.fields, - default_sorting_field: self.default_sorting_field, - token_separators: self.token_separators, - enable_nested_fields: self.enable_nested_fields, - symbols_to_index: self.symbols_to_index, - ..Default::default() - } - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::field::*; - use serde_json::json; - - #[test] - fn collection_schema_serializes_as_expected() { - let fields = [ - ("company_name", "string".to_owned(), None), - ("num_employees", "int32".to_owned(), None), - ("country", "string".to_owned(), Some(true)), - ] - .map(|(name, typesense_type, facet)| { - FieldBuilder::new(name, typesense_type).facet(facet).build() - }) - .to_vec(); - - let collection = CollectionSchemaBuilder::new("companies", fields) - .default_sorting_field("num_employees".to_owned()) - .build(); - - let expected = json!( - { - "name": "companies", - "fields": [ - { - "name" : "company_name", - "type" : "string" - }, - { - "name" : "num_employees", - "type" : "int32" - }, - { - "name" : "country", - "type" : "string", - "facet" : true - } - ], - "default_sorting_field": "num_employees" - } - ); - - assert_eq!(serde_json::to_value(&collection).unwrap(), expected) - } -} diff --git a/typesense/src/error.rs b/typesense/src/error.rs index 2e5610b..68e9d4f 100644 --- a/typesense/src/error.rs +++ b/typesense/src/error.rs @@ -1,3 +1,5 @@ +//! Contains the error types for the Typesense client + use thiserror::Error; pub use typesense_codegen::apis::Error as ApiError; diff --git a/typesense/src/field/mod.rs b/typesense/src/field/mod.rs deleted file mode 100644 index 5629728..0000000 --- a/typesense/src/field/mod.rs +++ /dev/null @@ -1,147 +0,0 @@ -//! Module with the common definitions for the -//! [fields](https://github.com/typesense/typesense/blob/v0.19.0/include/field.) -//! available in Typesense. - -mod field_type; -pub use field_type::*; -pub use typesense_codegen::models::{Field, FieldEmbed}; - -/// Builder for the `Field` struct. -#[derive(Debug, Default)] -pub struct FieldBuilder { - name: String, - typesense_type: FieldType, - optional: Option, - facet: Option, - index: Option, - locale: Option, - sort: Option, - infix: Option, - num_dim: Option, - drop: Option, - embed: Option>, - store: Option, - stem: Option, - range_index: Option, - vec_dist: Option, -} - -impl FieldBuilder { - /// Create a Builder - #[inline] - pub fn new(name: impl Into, typesense_type: FieldType) -> Self { - Self { - name: name.into(), - typesense_type, - ..Default::default() - } - } - - /// Set if field is optional. - #[inline] - pub fn optional(mut self, optional: Option) -> Self { - self.optional = optional; - self - } - - /// Set if field is facet. - #[inline] - pub fn facet(mut self, facet: Option) -> Self { - self.facet = facet; - self - } - - /// Set if field is index. - #[inline] - pub fn index(mut self, index: Option) -> Self { - self.index = index; - self - } - - /// Set field locale. - #[inline] - pub fn locale(mut self, locale: Option) -> Self { - self.locale = locale; - self - } - - /// Set sort attribute for field - #[inline] - pub fn sort(mut self, sort: Option) -> Self { - self.sort = sort; - self - } - - /// Set infix attribute for field - #[inline] - pub fn infix(mut self, infix: Option) -> Self { - self.infix = infix; - self - } - - /// Set num_dim attribute for field - #[inline] - pub fn num_dim(mut self, num_dim: Option) -> Self { - self.num_dim = num_dim; - self - } - - /// Set drop attribute for field - #[inline] - pub fn drop(mut self, drop: Option) -> Self { - self.drop = drop; - self - } - - /// Set store attribute for field - #[inline] - pub fn store(mut self, store: Option) -> Self { - self.store = store; - self - } - - /// Set stem attribute for field - #[inline] - pub fn stem(mut self, stem: Option) -> Self { - self.stem = stem; - self - } - - /// Set range_index attribute for field - #[inline] - pub fn range_index(mut self, range_index: Option) -> Self { - self.range_index = range_index; - self - } - - /// Set vec_dist attribute for field - #[inline] - pub fn vec_dist(mut self, vec_dist: Option) -> Self { - self.vec_dist = vec_dist; - self - } - - /// Create a `Field` with the current values of the builder, - /// It can fail if the name or the typesense_type are not defined. - #[inline] - pub fn build(self) -> Field { - Field { - name: self.name, - r#type: self.typesense_type, - optional: self.optional, - facet: self.facet, - index: self.index, - locale: self.locale, - sort: self.sort, - infix: self.infix, - num_dim: self.num_dim, - drop: self.drop, - embed: self.embed, - store: self.store, - stem: self.stem, - range_index: self.range_index, - vec_dist: self.vec_dist, - ..Default::default() - } - } -} diff --git a/typesense/src/lib.rs b/typesense/src/lib.rs index 2ac59cf..90ad30b 100644 --- a/typesense/src/lib.rs +++ b/typesense/src/lib.rs @@ -44,20 +44,18 @@ //! ``` mod client; -mod error; +mod traits; -pub mod collection_schema; -pub mod document; -pub mod field; -pub mod prelude; -// pub mod keys; +pub mod builders; +pub mod error; pub mod models; +pub mod prelude; +pub use builders::*; pub use client::{Client, MultiNodeConfiguration}; pub use error::*; pub use models::*; -pub use prelude::*; -#[cfg(feature = "typesense_derive")] +#[cfg(feature = "derive")] #[doc(hidden)] pub use typesense_derive::*; diff --git a/typesense/src/prelude.rs b/typesense/src/prelude.rs index 9c591cd..1aeb654 100644 --- a/typesense/src/prelude.rs +++ b/typesense/src/prelude.rs @@ -3,21 +3,7 @@ //! This module re-exports the most commonly used traits and types from the library, //! making them easy to import with a single `use` statement. -use serde::de::DeserializeOwned; +pub use crate::traits::{Document, FieldType, MultiSearchResultExt, ToTypesenseField}; -use crate::{MultiSearchParseError, SearchResult}; - -/// An extension trait for `typesense_codegen::models::MultiSearchResult` to provide typed parsing. -pub trait MultiSearchResultExt { - /// Parses the result at a specific index from a multi-search response into a strongly-typed `SearchResult`. - /// - /// # Arguments - /// * `index` - The zero-based index of the search result to parse. - /// - /// # Type Parameters - /// * `T` - The concrete document type to deserialize the hits into. - fn parse_at( - &self, - index: usize, - ) -> Result, MultiSearchParseError>; -} +// pub use crate::error::Error as TypesenseError; +// pub use crate::error::MultiSearchParseError; diff --git a/typesense/src/document.rs b/typesense/src/traits/document.rs similarity index 100% rename from typesense/src/document.rs rename to typesense/src/traits/document.rs diff --git a/typesense/src/field/field_type.rs b/typesense/src/traits/field_type.rs similarity index 95% rename from typesense/src/field/field_type.rs rename to typesense/src/traits/field_type.rs index da98b48..fd4dee2 100644 --- a/typesense/src/field/field_type.rs +++ b/typesense/src/traits/field_type.rs @@ -1,4 +1,4 @@ -use crate::document::Document; +use crate::traits::Document; use std::collections::{BTreeMap, HashMap}; /// Type for a field. Currently it is a wrapping to a `String` but it could be extended to a enum pub type FieldType = String; @@ -27,7 +27,7 @@ impl ToTypesenseField for Vec { #[macro_export] macro_rules! impl_to_typesense_field ( ($for:ty, $typesense_variant:expr) => { - impl $crate::field::ToTypesenseField for $for { + impl $crate::prelude::ToTypesenseField for $for { #[inline(always)] fn to_typesense_type() -> &'static str { $typesense_variant @@ -35,7 +35,7 @@ macro_rules! impl_to_typesense_field ( } }; ($for:ty, $typesense_variant:expr, $any:ident) => { - impl<$any> $crate::field::ToTypesenseField for $for { + impl<$any> $crate::prelude::ToTypesenseField for $for { #[inline(always)] fn to_typesense_type() -> &'static str { $typesense_variant diff --git a/typesense/src/traits/mod.rs b/typesense/src/traits/mod.rs new file mode 100644 index 0000000..667128e --- /dev/null +++ b/typesense/src/traits/mod.rs @@ -0,0 +1,9 @@ +//! Contains the core traits and extensions for Typesense client operations + +mod document; +mod field_type; +mod multi_search_ext; + +pub use document::Document; +pub use field_type::*; +pub use multi_search_ext::MultiSearchResultExt; diff --git a/typesense/src/traits/multi_search_ext.rs b/typesense/src/traits/multi_search_ext.rs new file mode 100644 index 0000000..dff04f2 --- /dev/null +++ b/typesense/src/traits/multi_search_ext.rs @@ -0,0 +1,18 @@ +use serde::de::DeserializeOwned; + +use crate::{MultiSearchParseError, SearchResult}; + +/// An extension trait for `typesense_codegen::models::MultiSearchResult` to provide typed parsing. +pub trait MultiSearchResultExt { + /// Parses the result at a specific index from a multi-search response into a strongly-typed `SearchResult`. + /// + /// # Arguments + /// * `index` - The zero-based index of the search result to parse. + /// + /// # Type Parameters + /// * `T` - The concrete document type to deserialize the hits into. + fn parse_at( + &self, + index: usize, + ) -> Result, MultiSearchParseError>; +} diff --git a/typesense/tests/client/derive_integration_test.rs b/typesense/tests/client/derive_integration_test.rs index 98516bc..7f41b0e 100644 --- a/typesense/tests/client/derive_integration_test.rs +++ b/typesense/tests/client/derive_integration_test.rs @@ -1,7 +1,8 @@ use serde::{Deserialize, Serialize}; use typesense::models::SearchParameters; +use typesense::prelude::*; +use typesense::Field; use typesense::Typesense; -use typesense::{document::Document, Field}; use crate::{get_client, new_id}; diff --git a/typesense/tests/derive/derive_collection_schema_test.rs b/typesense/tests/derive/derive_collection_schema_test.rs index fedb8fc..66dba81 100644 --- a/typesense/tests/derive/derive_collection_schema_test.rs +++ b/typesense/tests/derive/derive_collection_schema_test.rs @@ -2,12 +2,12 @@ use std::collections::BTreeMap; use serde::{Deserialize, Serialize}; use serde_json::json; -use typesense::document::Document; +use typesense::prelude::*; use typesense::Typesense; // Helper to convert schema to BTreeMap for order-independent comparison fn schema_to_map( - schema: &typesense::collection_schema::CollectionSchema, + schema: &typesense::models::CollectionSchema, ) -> BTreeMap { serde_json::from_value(serde_json::to_value(schema).unwrap()).unwrap() } diff --git a/typesense_derive/src/field_attrs.rs b/typesense_derive/src/field_attrs.rs index b6d9720..9ba96a3 100644 --- a/typesense_derive/src/field_attrs.rs +++ b/typesense_derive/src/field_attrs.rs @@ -316,7 +316,7 @@ fn get_inner_type(mut ty: &syn::Type) -> &syn::Type { } /// Processes a single struct field. -/// Returns a TokenStream which evaluates to a `Vec`. +/// Returns a TokenStream which evaluates to a `Vec`. pub fn process_field(field: &Field) -> syn::Result { let field_attrs = extract_field_attrs(&field.attrs)?; @@ -348,7 +348,7 @@ pub fn process_field(field: &Field) -> syn::Result { Ok(quote! { { - <#inner_type as typesense::document::Document>::collection_schema().fields + <#inner_type as typesense::prelude::Document>::collection_schema().fields .into_iter() .map(|mut f| { // Use the dynamically determined prefix here @@ -378,33 +378,27 @@ pub fn process_field(field: &Field) -> syn::Result { let typesense_field_type = if let Some(override_str) = &field_attrs.type_override { quote! { #override_str.to_owned() } } else { - quote! { <#ty as typesense::field::ToTypesenseField>::to_typesense_type().to_owned() } + quote! { <#ty as typesense::prelude::ToTypesenseField>::to_typesense_type().to_owned() } }; let optional = field_attrs .optional .or(if is_option_type { Some(true) } else { None }) - .map(|v| quote!(.optional(Some(#v)))); - let facet = field_attrs.facet.map(|v| quote!(.facet(Some(#v)))); - let index = field_attrs.index.map(|v| quote!(.index(Some(#v)))); - let store = field_attrs.store.map(|v| quote!(.store(Some(#v)))); - let sort = field_attrs.sort.map(|v| quote!(.sort(Some(#v)))); - let infix = field_attrs.infix.map(|v| quote!(.infix(Some(#v)))); - let stem = field_attrs.stem.map(|v| quote!(.stem(Some(#v)))); - let range_index = field_attrs - .range_index - .map(|v| quote!(.range_index(Some(#v)))); - let locale = field_attrs - .locale - .map(|v| quote!(.locale(Some(#v.to_string())))); - let vec_dist = field_attrs - .vec_dist - .map(|v| quote!(.vec_dist(Some(#v.to_string())))); - let num_dim = field_attrs.num_dim.map(|v| quote!(.num_dim(Some(#v)))); + .map(|v| quote!(.optional(#v))); + let facet = field_attrs.facet.map(|v| quote!(.facet(#v))); + let index = field_attrs.index.map(|v| quote!(.index(#v))); + let store = field_attrs.store.map(|v| quote!(.store(#v))); + let sort = field_attrs.sort.map(|v| quote!(.sort(#v))); + let infix = field_attrs.infix.map(|v| quote!(.infix(#v))); + let stem = field_attrs.stem.map(|v| quote!(.stem(#v))); + let range_index = field_attrs.range_index.map(|v| quote!(.range_index(#v))); + let locale = field_attrs.locale.map(|v| quote!(.locale(#v))); + let vec_dist = field_attrs.vec_dist.map(|v| quote!(.vec_dist(#v))); + let num_dim = field_attrs.num_dim.map(|v| quote!(.num_dim(#v))); Ok(quote! { vec![ - typesense::field::FieldBuilder::new(#field_name, #typesense_field_type) + typesense::builders::new_collection_field(#field_name, #typesense_field_type) #optional #facet #index #store #sort #infix #stem #range_index #locale #vec_dist #num_dim .build() ] diff --git a/typesense_derive/src/lib.rs b/typesense_derive/src/lib.rs index 6810551..1379530 100644 --- a/typesense_derive/src/lib.rs +++ b/typesense_derive/src/lib.rs @@ -70,7 +70,7 @@ fn impl_typesense_collection(item: ItemStruct) -> syn::Result { let default_sorting_field = if let Some(v) = default_sorting_field { quote! { - builder = builder.default_sorting_field(#v); + let builder = builder.default_sorting_field(#v); } } else { proc_macro2::TokenStream::new() @@ -78,7 +78,7 @@ fn impl_typesense_collection(item: ItemStruct) -> syn::Result { let enable_nested_fields = if let Some(v) = enable_nested_fields { quote! { - builder = builder.enable_nested_fields(Some(#v)); + let builder = builder.enable_nested_fields(#v); } } else { proc_macro2::TokenStream::new() @@ -86,7 +86,7 @@ fn impl_typesense_collection(item: ItemStruct) -> syn::Result { let symbols_to_index = if let Some(v) = symbols_to_index { quote! { - builder = builder.symbols_to_index(vec![#(#v.to_string()),*]); + let builder = builder.symbols_to_index(vec![#(#v.to_string()),*]); } } else { proc_macro2::TokenStream::new() @@ -94,23 +94,24 @@ fn impl_typesense_collection(item: ItemStruct) -> syn::Result { let token_separators = if let Some(v) = token_separators { quote! { - builder = builder.token_separators(vec![#(#v.to_string()),*]); + let builder = builder.token_separators(vec![#(#v.to_string()),*]); } } else { proc_macro2::TokenStream::new() }; let gen = quote! { - impl #impl_generics typesense::document::Document for #ident #ty_generics #where_clause { - fn collection_schema() -> typesense::collection_schema::CollectionSchema { + impl #impl_generics typesense::prelude::Document for #ident #ty_generics #where_clause { + fn collection_schema() -> typesense::models::CollectionSchema { let name = #collection_name.to_owned(); // Collect fields from all sources - let fields: Vec = vec![ + let fields: Vec = vec![ #(#typesense_fields,)* ].into_iter().flatten().collect(); - let mut builder = typesense::collection_schema::CollectionSchemaBuilder::new(name, fields); + // start the bon builder and set fields + let builder = typesense::builders::new_collection_schema(name, fields); #default_sorting_field #enable_nested_fields @@ -130,7 +131,7 @@ fn add_trait_bounds(mut generics: syn::Generics) -> syn::Generics { if let syn::GenericParam::Type(ref mut type_param) = *param { type_param .bounds - .push(syn::parse_quote!(typesense::field::ToTypesenseField)); + .push(syn::parse_quote!(typesense::prelude::ToTypesenseField)); } } generics From 0b3dde02f72123aecdd5f514dc397267a9ade482 Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Wed, 20 Aug 2025 08:29:32 +0700 Subject: [PATCH 12/21] feat: wasm support & ergonomic client builder --- Cargo.toml | 1 - typesense/Cargo.toml | 22 +- typesense/src/client/collection/document.rs | 13 +- typesense/src/client/mod.rs | 489 +++++++++++------- typesense/src/client/multi_search.rs | 16 +- typesense/src/lib.rs | 140 +++-- typesense/src/models/api_key_actions.rs | 292 +++++++++++ typesense/tests/api/collection.rs | 272 +++++----- typesense/tests/api/documents.rs | 222 ++++---- typesense/tests/client/aliases_test.rs | 27 +- typesense/tests/client/analytics_test.rs | 27 +- typesense/tests/client/client_test.rs | 62 ++- typesense/tests/client/collections_test.rs | 27 +- .../tests/client/conversation_models_test.rs | 22 +- .../tests/client/derive_integration_test.rs | 41 +- typesense/tests/client/documents_test.rs | 40 +- typesense/tests/client/keys_test.rs | 27 +- typesense/tests/client/mod.rs | 21 +- typesense/tests/client/multi_search_test.rs | 75 ++- typesense/tests/client/presets_test.rs | 27 +- .../tests/client/search_overrides_test.rs | 27 +- .../client/stemming_dictionaries_test.rs | 27 +- typesense/tests/client/stopwords_test.rs | 27 +- typesense/tests/client/synonyms_test.rs | 27 +- typesense_codegen/.openapi-generator-ignore | 4 +- typesense_codegen/Cargo.toml | 7 +- typesense_codegen/src/apis/configuration.rs | 17 +- typesense_codegen/src/apis/mod.rs | 22 +- 28 files changed, 1430 insertions(+), 591 deletions(-) create mode 100644 typesense/src/models/api_key_actions.rs diff --git a/Cargo.toml b/Cargo.toml index 322f339..a620901 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,3 @@ - [workspace] members = [ "typesense", diff --git a/typesense/Cargo.toml b/typesense/Cargo.toml index 111346d..edc37fe 100644 --- a/typesense/Cargo.toml +++ b/typesense/Cargo.toml @@ -25,28 +25,38 @@ serde_json = "1.0" sha2 = "0.10" typesense_derive = { version = "0.1.0", path = "../typesense_derive", optional = true } typesense_codegen = { version = "0.25.0", path = "../typesense_codegen" } -reqwest-retry = "0.7.0" -reqwest = { version = "0.12", features = ["json"] } -reqwest-middleware = { version = "0.4.2", features = ["json"] } thiserror = "1.0" bon = "3.7.0" -strum = { version = "0.26", features = ["derive"] } +reqwest-retry = "0.7.0" +web-time = "=1.1.0" +# strum = { version = "0.26", features = ["derive"] } +# ---------- native-only dependencies (non-wasm) ---------- +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +reqwest = { version = "0.12", features = ["json"] } +reqwest-middleware = { version = "0.4.2"} + +# ---------- wasm deps ---------- +[target.'cfg(target_arch = "wasm32")'.dependencies] +# IMPORTANT: disable default features to avoid hyper/tokio/mio +reqwest = { version = "0.12", default-features = false, features = ["json"] } [dev-dependencies] dotenvy = "0.15" trybuild = "1.0.42" -tokio = { version = "1", features = ["macros", "rt-multi-thread"] } -wiremock = "0.5" nanoid = "0.4" +# native-only dev deps [target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] tokio = { version = "1.5", features = ["macros", "rt", "rt-multi-thread"] } +wiremock = "0.5" +# wasm test deps [target.'cfg(target_arch = "wasm32")'.dev-dependencies] console_error_panic_hook = "0.1.6" wasm-bindgen = "0.2" wasm-bindgen-test = "0.3.23" +wasm-bindgen-futures = "0.4.50" [[test]] name = "derive_tests" diff --git a/typesense/src/client/collection/document.rs b/typesense/src/client/collection/document.rs index 0f2e48d..f5f682a 100644 --- a/typesense/src/client/collection/document.rs +++ b/typesense/src/client/collection/document.rs @@ -79,18 +79,17 @@ where /// # Example /// ```no_run /// # use serde::{Serialize, Deserialize}; - /// # use typesense::{Client, MultiNodeConfiguration, models}; + /// # use typesense::{Client, models}; /// # use reqwest::Url; /// # #[derive(Serialize, Deserialize)] /// # struct Book { id: String, title: String, pages: i32 } /// # /// # async fn run() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// let book_update = serde_json::json!({ "pages": 654 }); /// /// // Simple update diff --git a/typesense/src/client/mod.rs b/typesense/src/client/mod.rs index afc91f7..ce7bba2 100644 --- a/typesense/src/client/mod.rs +++ b/typesense/src/client/mod.rs @@ -13,24 +13,27 @@ //! //! ## Example Usage //! +//! The following example demonstrates how to use the client in a standard +//! server-side **Tokio** environment. +//! //! ```no_run -//! use typesense::{Client, MultiNodeConfiguration, models}; +//! #[cfg(not(target_family = "wasm"))] +//! { +//! use typesense::{Client, models}; //! use reqwest::Url; //! use reqwest_retry::policies::ExponentialBackoff; //! use std::time::Duration; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { -//! let config = MultiNodeConfiguration { -//! nodes: vec![Url::parse("http://localhost:8108")?], -//! nearest_node: None, -//! api_key: "your-api-key".to_string(), -//! healthcheck_interval: Duration::from_secs(60), -//! retry_policy: ExponentialBackoff::builder().build_with_max_retries(3), -//! connection_timeout: Duration::from_secs(10), -//! }; -//! -//! let client = Client::new(config)?; +//! let client = Client::builder() +//! .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) +//! .api_key("xyz") +//! .healthcheck_interval(Duration::from_secs(60)) +//! .retry_policy(ExponentialBackoff::builder().build_with_max_retries(3)) +//! .connection_timeout(Duration::from_secs(5)) +//! .build() +//! .unwrap(); //! //! // Retrieve details for a collection //! let collection = client.collection("products").retrieve().await?; @@ -42,13 +45,73 @@ //! query_by: Some("name".to_string()), //! ..Default::default() //! }; -//! let search_results = client.collection("products").documents().search(search_params).await?; -//! println!("Found {} hits.", search_results.found.unwrap_or(0)); //! +//! let search_results = client +//! .collection("products") +//! .documents() +//! .search(search_params) +//! .await?; +//! +//! println!("Found {} hits.", search_results.found.unwrap_or(0)); //! Ok(()) //! } +//! } +//! ``` +//! --- +//! +//! ### WebAssembly (Wasm) Usage +//! +//! When compiling for a WebAssembly target (`wasm32-unknown-unknown`), the +//! client's underlying HTTP transport and runtime are different. +//! +//! - `reqwest` internally uses the browser's **fetch API**. +//! - Tokio-based features such as middleware, retries, and connection +//! timeouts are **not available**. +//! +//! Example: +//! +//! ```no_run +//! #[cfg(target_family = "wasm")] +//! { +//! use typesense::{Client, models}; +//! use reqwest::Url; +//! use std::time::Duration; +//! use wasm_bindgen_futures::spawn_local; +//! +//! fn main() { +//! spawn_local(async { +//! let client = Client::builder() +//! .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) +//! .api_key("xyz") +//! .healthcheck_interval(Duration::from_secs(60)) +//! // .retry_policy(...) <-- not supported in Wasm +//! // .connection_timeout(...) <-- not supported in Wasm +//! .build() +//! .unwrap(); +//! +//! // Retrieve details for a collection +//! match client.collection("products").retrieve().await { +//! Ok(collection) => println!("Collection Name: {}", collection.name), +//! Err(e) => eprintln!("Error retrieving collection: {}", e), +//! } +//! +//! // Search for a document +//! let search_params = models::SearchParameters { +//! q: Some("phone".to_string()), +//! query_by: Some("name".to_string()), +//! ..Default::default() +//! }; +//! +//! match client.collection("products").documents().search(search_params).await { +//! Ok(search_results) => { +//! println!("Found {} hits.", search_results.found.unwrap_or(0)); +//! } +//! Err(e) => eprintln!("Error searching documents: {}", e), +//! } +//! }); +//! } +//! } //! ``` - mod alias; mod aliases; mod analytics; @@ -84,15 +147,19 @@ use stopwords::Stopwords; use crate::Error; use reqwest::Url; -use reqwest_middleware::ClientBuilder; -use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware}; +#[cfg(not(target_arch = "wasm32"))] +use reqwest_middleware::ClientBuilder as ReqwestMiddlewareClientBuilder; +use reqwest_retry::policies::ExponentialBackoff; +#[cfg(not(target_arch = "wasm32"))] +use reqwest_retry::RetryTransientMiddleware; + use std::future::Future; use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, Mutex, }; -use std::time::{Duration, Instant}; use typesense_codegen::apis::{self, configuration}; +use web_time::{Duration, Instant}; use crate::client::multi_search::MultiSearch; @@ -105,42 +172,26 @@ struct Node { last_access_timestamp: Instant, } -/// Configuration for the multi-node Typesense client. -#[derive(Clone, Debug)] -pub struct MultiNodeConfiguration { - /// A list of all nodes in the Typesense cluster. - pub nodes: Vec, - /// An optional, preferred node to try first for every request. Ideal for reducing latency. - pub nearest_node: Option, - /// The Typesense API key used for authentication. - pub api_key: String, - /// The duration after which an unhealthy node will be retried for requests. - pub healthcheck_interval: Duration, - /// The retry policy for transient network errors on a *single* node. - pub retry_policy: ExponentialBackoff, - /// The timeout for each individual network request. - pub connection_timeout: Duration, -} -impl Default for MultiNodeConfiguration { - /// Provides a default configuration suitable for local development. - /// - /// - **nodes**: Empty. - /// - **nearest_node**: None. - /// - **api_key**: "xyz" (a common placeholder). - /// - **healthcheck_interval**: 60 seconds. - /// - **retry_policy**: Exponential backoff with a maximum of 3 retries. - /// - **connection_timeout**: 5 seconds. - fn default() -> Self { - Self { - nodes: vec![], - nearest_node: None, - api_key: "xyz".to_string(), - healthcheck_interval: Duration::from_secs(60), - retry_policy: ExponentialBackoff::builder().build_with_max_retries(3), - connection_timeout: Duration::from_secs(5), - } - } -} +// impl Default for MultiNodeConfiguration { +// /// Provides a default configuration suitable for local development. +// /// +// /// - **nodes**: Empty. +// /// - **nearest_node**: None. +// /// - **api_key**: "xyz" (a common placeholder). +// /// - **healthcheck_interval**: 60 seconds. +// /// - **retry_policy**: Exponential backoff with a maximum of 3 retries. +// /// - **connection_timeout**: 5 seconds. +// fn default() -> Self { +// Self { +// nodes: vec![], +// nearest_node: None, +// api_key: "xyz".to_string(), +// healthcheck_interval: Duration::from_secs(60), +// retry_policy: ExponentialBackoff::builder().build_with_max_retries(3), +// connection_timeout: Duration::from_secs(5), +// } +// } +// } /// The main entry point for all interactions with the Typesense API. /// @@ -153,22 +204,43 @@ pub struct Client { nearest_node: Option>>, api_key: String, healthcheck_interval: Duration, + current_node_index: AtomicUsize, + + #[cfg(not(target_arch = "wasm32"))] retry_policy: ExponentialBackoff, + #[cfg(not(target_arch = "wasm32"))] connection_timeout: Duration, - current_node_index: AtomicUsize, } +#[bon::bon] impl Client { /// Creates a new `Client` with the given configuration. /// /// Returns an error if the configuration contains no nodes. - pub fn new(config: MultiNodeConfiguration) -> Result { - if config.nodes.is_empty() && config.nearest_node.is_none() { + #[builder] + pub fn new( + /// The Typesense API key used for authentication. + api_key: impl Into, + /// A list of all nodes in the Typesense cluster. + nodes: Vec, + /// An optional, preferred node to try first for every request. Ideal for reducing latency. + #[builder(into)] + nearest_node: Option, + #[builder(default = Duration::from_secs(60))] + /// The duration after which an unhealthy node will be retried for requests. + healthcheck_interval: Duration, + #[builder(default = ExponentialBackoff::builder().build_with_max_retries(3))] + /// The retry policy for transient network errors on a *single* node. + retry_policy: ExponentialBackoff, + #[builder(default = Duration::from_secs(10))] + /// The timeout for each individual network request. + connection_timeout: Duration, + ) -> Result { + if nodes.is_empty() && nearest_node.is_none() { return Err("Configuration must include at least one node or a nearest_node."); } - let nodes = config - .nodes + let node_list = nodes .into_iter() .map(|url| { Arc::new(Mutex::new(Node { @@ -179,7 +251,7 @@ impl Client { }) .collect(); - let nearest_node = config.nearest_node.map(|url| { + let nearest_node_arc = nearest_node.map(|url| { Arc::new(Mutex::new(Node { url, is_healthy: true, @@ -188,13 +260,16 @@ impl Client { }); Ok(Self { - nodes, - nearest_node, - api_key: config.api_key, - healthcheck_interval: config.healthcheck_interval, - retry_policy: config.retry_policy, - connection_timeout: config.connection_timeout, + nodes: node_list, + nearest_node: nearest_node_arc, + api_key: api_key.into(), + healthcheck_interval, current_node_index: AtomicUsize::new(0), + + #[cfg(not(target_arch = "wasm32"))] + retry_policy, + #[cfg(not(target_arch = "wasm32"))] + connection_timeout, }) } @@ -265,8 +340,15 @@ impl Client { node.url.clone() }; + #[cfg(target_arch = "wasm32")] + let http_client = reqwest::Client::builder() + // .timeout() is not available on wasm32 + .build() + .expect("Failed to build reqwest client"); + // This client handles transient retries (e.g. network blips) on the *current node*. - let http_client = ClientBuilder::new( + #[cfg(not(target_arch = "wasm32"))] + let http_client = ReqwestMiddlewareClientBuilder::new( reqwest::Client::builder() .timeout(self.connection_timeout) .build() @@ -320,20 +402,22 @@ impl Client { /// /// # Example /// ```no_run - /// # use typesense::{Client, MultiNodeConfiguration}; + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::Client; /// # use reqwest::Url; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// let all_aliases = client.aliases().retrieve().await.unwrap(); /// # Ok(()) /// # } + /// # } /// ``` pub fn aliases(&self) -> Aliases<'_> { Aliases::new(self) @@ -342,20 +426,22 @@ impl Client { /// Provides access to a specific collection alias's-related API endpoints. /// # Example /// ```no_run - /// # use typesense::{Client, MultiNodeConfiguration}; + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::Client; /// # use reqwest::Url; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// let specific_alias = client.alias("books_alias").retrieve().await.unwrap(); /// # Ok(()) /// # } + /// # } /// ``` pub fn alias<'a>(&'a self, name: &'a str) -> Alias<'a> { Alias::new(self, name) @@ -364,20 +450,22 @@ impl Client { /// Provides access to API endpoints for managing collections like `create()` and `retrieve()`. /// # Example /// ```no_run - /// # use typesense::{Client, MultiNodeConfiguration}; + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::Client; /// # use reqwest::Url; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// let all_collections = client.collections().retrieve().await.unwrap(); /// # Ok(()) /// # } + /// # } /// ``` pub fn collections(&self) -> Collections<'_> { Collections::new(self) @@ -402,19 +490,20 @@ impl Client { /// When you want to retrieve or search for documents and have them automatically /// deserialized into your own structs. /// ```no_run - /// # use typesense::{Client, MultiNodeConfiguration}; + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::Client; /// # use serde::{Serialize, Deserialize}; /// # use reqwest::Url; /// # /// # #[derive(Serialize, Deserialize, Debug)] /// # struct Book { id: String, title: String } /// # async fn run() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// // Get a typed handle to the "books" collection /// let books_collection = client.collection_of::("books"); /// @@ -424,6 +513,7 @@ impl Client { /// # /// # Ok(()) /// # } + /// # } /// ``` pub fn collection_of<'a, T>(&'a self, collection_name: &'a str) -> Collection<'a, T> where @@ -445,19 +535,21 @@ impl Client { /// /// # Example /// ```no_run - /// # use typesense::{Client, MultiNodeConfiguration}; + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::Client; /// # use reqwest::Url; /// # async fn run() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// let products_collection = client.collection("products"); /// # /// # Ok(()) /// # } + /// # } /// ``` pub fn collection<'a>(&'a self, collection_name: &'a str) -> Collection<'a, serde_json::Value> { Collection::new(self, collection_name) @@ -466,20 +558,22 @@ impl Client { /// Provides access to the analytics-related API endpoints. /// # Example /// ```no_run - /// # use typesense::{Client, MultiNodeConfiguration}; + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::Client; /// # use reqwest::Url; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// let analytics_rules = client.analytics().rules().retrieve().await.unwrap(); /// # Ok(()) /// # } + /// # } /// ``` pub fn analytics(&self) -> Analytics<'_> { Analytics::new(self) @@ -488,42 +582,46 @@ impl Client { /// Returns a `Conversations` instance for managing conversation models. /// # Example /// ```no_run - /// # use typesense::{Client, MultiNodeConfiguration}; + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::Client; /// # use reqwest::Url; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// let conversation = client.conversations().models().retrieve().await.unwrap(); /// # Ok(()) /// # } + /// # } /// ``` - pub fn conversations(&self) -> Conversations { + pub fn conversations(&self) -> Conversations<'_> { Conversations::new(self) } /// Provides access to top-level, non-namespaced API endpoints like `health` and `debug`. /// # Example /// ```no_run - /// # use typesense::{Client, MultiNodeConfiguration}; + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::Client; /// # use reqwest::Url; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// let health = client.operations().health().await.unwrap(); /// # Ok(()) /// # } + /// # } /// ``` pub fn operations(&self) -> Operations<'_> { Operations::new(self) @@ -533,17 +631,18 @@ impl Client { /// /// # Example /// ```no_run - /// # use typesense::{Client, MultiNodeConfiguration, models}; + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::{Client, models}; /// # use reqwest::Url; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// # let schema = models::ApiKeySchema { /// # description: "Search-only key.".to_string(), /// # actions: vec!["documents:search".to_string()], @@ -553,6 +652,7 @@ impl Client { /// let new_key = client.keys().create(schema).await.unwrap(); /// # Ok(()) /// # } + /// # } /// ``` pub fn keys(&self) -> Keys<'_> { Keys::new(self) @@ -565,20 +665,22 @@ impl Client { /// /// # Example /// ```no_run - /// # use typesense::{Client, MultiNodeConfiguration}; + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::Client; /// # use reqwest::Url; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// let deleted_key = client.key(123).delete().await.unwrap(); /// # Ok(()) /// # } + /// # } /// ``` pub fn key(&self, key_id: i64) -> Key<'_> { Key::new(self, key_id) @@ -588,22 +690,24 @@ impl Client { /// /// # Example /// ```no_run - /// # use typesense::{Client, MultiNodeConfiguration}; + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::Client; /// # use reqwest::Url; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// let list_of_presets = client.presets().retrieve().await.unwrap(); /// # Ok(()) /// # } + /// # } /// ``` - pub fn presets(&self) -> Presets { + pub fn presets(&self) -> Presets<'_> { Presets::new(self) } @@ -614,20 +718,22 @@ impl Client { /// /// # Example /// ```no_run - /// # use typesense::{Client, MultiNodeConfiguration}; + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::Client; /// # use reqwest::Url; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// let preset = client.preset("my-preset").retrieve().await.unwrap(); /// # Ok(()) /// # } + /// # } /// ``` pub fn preset<'a>(&'a self, preset_id: &'a str) -> Preset<'a> { Preset::new(self, preset_id) @@ -637,25 +743,26 @@ impl Client { /// /// # Example /// - /// ``` - /// # use typesense::{Client, MultiNodeConfiguration, models}; + /// ```no_run + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::{Client, models}; /// # use reqwest::Url; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// let response = client.stemming().dictionaries().retrieve().await.unwrap(); /// # println!("{:#?}", response); /// # Ok(()) /// # } + /// # } /// ``` - pub fn stemming(&self) -> Stemming { + pub fn stemming(&self) -> Stemming<'_> { Stemming::new(self) } @@ -663,20 +770,22 @@ impl Client { /// /// # Example /// ```no_run - /// # use typesense::{Client, MultiNodeConfiguration, models}; + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::{Client, models}; /// # use reqwest::Url; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// let all_stopwords = client.stopwords().retrieve().await.unwrap(); /// # Ok(()) /// # } + /// # } /// ``` pub fn stopwords(&self) -> Stopwords<'_> { Stopwords::new(self) @@ -689,20 +798,22 @@ impl Client { /// /// # Example /// ```no_run - /// # use typesense::{Client, MultiNodeConfiguration, models}; + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::{Client, models}; /// # use reqwest::Url; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// let my_stopword_set = client.stopword("common_words").retrieve().await.unwrap(); /// # Ok(()) /// # } + /// # } /// ``` pub fn stopword<'a>(&'a self, set_id: &'a str) -> Stopword<'a> { Stopword::new(self, set_id) @@ -712,17 +823,18 @@ impl Client { /// /// # Example /// ```no_run - /// # use typesense::{Client, MultiNodeConfiguration, models}; + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::{Client, models}; /// # use reqwest::Url; /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// # let search_requests = models::MultiSearchSearchesParameter { /// # searches: vec![models::MultiSearchCollectionParameters { /// # collection: Some("products".to_string()), @@ -736,6 +848,7 @@ impl Client { /// let results = client.multi_search().perform(&search_requests, &common_params).await.unwrap(); /// # Ok(()) /// # } + /// # } /// ``` pub fn multi_search(&self) -> MultiSearch<'_> { MultiSearch::new(self) @@ -751,9 +864,15 @@ where match error { // Server-side errors (5xx) indicate a problem with the node, so we should try another. apis::Error::ResponseError(content) => content.status.is_server_error(), - // Underlying reqwest errors (e.g. connection refused) are retriable. + + // Underlying reqwest errors (e.g., connection refused) are retriable on both native and wasm. + apis::Error::Reqwest(_) => true, + // Network-level errors from middleware are always retriable. - apis::Error::Reqwest(_) | apis::Error::ReqwestMiddleware(_) => true, + // This match arm is ONLY included when compiling for non-wasm targets. + #[cfg(not(target_arch = "wasm32"))] + apis::Error::ReqwestMiddleware(_) => true, + // Client-side (4xx) or parsing errors are not retriable as the request is likely invalid. _ => false, } diff --git a/typesense/src/client/multi_search.rs b/typesense/src/client/multi_search.rs index c6c011e..5072839 100644 --- a/typesense/src/client/multi_search.rs +++ b/typesense/src/client/multi_search.rs @@ -92,7 +92,9 @@ impl<'a> MultiSearch<'a> { /// This example demonstrates a federated search across two different collections. /// /// ```no_run - /// # use typesense::{Client, MultiNodeConfiguration, SearchResult, models, prelude::*}; + /// # #[cfg(not(target_family = "wasm"))] + /// # { + /// # use typesense::{Client, SearchResult, models, prelude::*}; /// # use reqwest::Url; /// # use serde::Deserialize; /// # @@ -104,12 +106,11 @@ impl<'a> MultiSearch<'a> { /// # /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// # let config = MultiNodeConfiguration { - /// # nodes: vec![Url::parse("http://localhost:8108")?], - /// # api_key: "xyz".to_string(), - /// # ..Default::default() - /// # }; - /// # let client = Client::new(config)?; + /// # let client = Client::builder() + /// # .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + /// # .api_key("xyz") + /// # .build() + /// # .unwrap(); /// // Define the individual search queries for different collections. /// let search_requests = models::MultiSearchSearchesParameter { /// searches: vec![ @@ -151,6 +152,7 @@ impl<'a> MultiSearch<'a> { /// println!("Found {} brands.", typed_brands.found.unwrap_or(0)); /// # Ok(()) /// # } + /// # } /// ``` /// # Arguments /// * `search_requests` - A reference to a `MultiSearchSearchesParameter` containing the list of individual search queries. The `union` field is ignored. diff --git a/typesense/src/lib.rs b/typesense/src/lib.rs index 90ad30b..afa3e62 100644 --- a/typesense/src/lib.rs +++ b/typesense/src/lib.rs @@ -3,43 +3,119 @@ //! # Typesense //! -//! Rust client library for Typesense +//! A Rust client library for the Typesense API. +//! +//! This library provides a modern, ergonomic, and async-native interface for +//! interacting with Typesense, with special attention to multi-node clusters and +//! platform-specific environments like WebAssembly. //! //! # Examples //! -//! ``` -//! #[cfg(any(feature = "tokio_test", target_arch = "wasm32"))] +//! The following examples demonstrate how to define a collection schema using +//! the Typesense derive macro and create it on the server. +//! +//! --- +//! +//! ### Native (Tokio) +//! +//! This example shows the typical setup for a server-side application using the +//! Tokio runtime. It includes features like connection timeouts and automatic +//! request retries. +//! +//! ```no_run +//! #[cfg(not(target_family = "wasm"))] //! { -//! use serde::{Deserialize, Serialize}; -//! use typesense::document::Document; -//! use typesense::Typesense; -//! use typesense::apis::collections_api; -//! use typesense::apis::configuration::{ApiKey, Configuration}; -//! -//! #[derive(Typesense, Serialize, Deserialize)] -//! #[typesense(collection_name = "companies", default_sorting_field = "num_employees")] -//! struct Company { -//! company_name: String, -//! num_employees: i32, -//! #[typesense(facet)] -//! country: String, -//! } +//! use serde::{Deserialize, Serialize}; +//! use typesense::{Client, Typesense, prelude::*}; +//! use reqwest::Url; +//! use reqwest_retry::policies::ExponentialBackoff; +//! use std::time::Duration; +//! +//! /// A struct representing a company document. +//! #[derive(Typesense, Serialize, Deserialize, Debug)] +//! #[typesense( +//! collection_name = "companies", +//! default_sorting_field = "num_employees" +//! )] +//! struct Company { +//! company_name: String, +//! num_employees: i32, +//! #[typesense(facet)] +//! country: String, +//! } +//! +//! #[tokio::main] +//! async fn main() -> Result<(), Box> { +//! let client = Client::builder() +//! .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) +//! .api_key("xyz") +//! .healthcheck_interval(Duration::from_secs(60)) +//! .retry_policy(ExponentialBackoff::builder().build_with_max_retries(3)) +//! .connection_timeout(Duration::from_secs(5)) +//! .build()?; +//! +//! // Create the collection in Typesense +//! let collection = client +//! .collections() +//! .create(Company::collection_schema()) +//! .await?; //! -//! #[tokio::main] -//! async fn main() { -//! let config = Configuration { -//! base_path: "http://localhost:5000".to_owned(), -//! api_key: Some(ApiKey { -//! prefix: None, -//! key: "VerySecretKey".to_owned(), -//! }), -//! ..Default::default() -//! }; -//! -//! let collection = collections_api::create_collection(&config, Company::collection_schema()) -//! .await -//! .unwrap(); +//! println!("Created collection: {:?}", collection); +//! Ok(()) +//! } //! } +//! ``` +//! +//! --- +//! +//! ### WebAssembly (Wasm) +//! +//! This example is tailored for a WebAssembly target. +//! Key differences: +//! - The `main` function is synchronous and uses `spawn_local` to run async code. +//! - Tokio-dependent features like `.retry_policy()` and `.connection_timeout()` +//! are disabled. You can still set them in the client builder but it will do nothing. +//! +//! ```no_run +//! #[cfg(target_family = "wasm")] +//! { +//! use serde::{Deserialize, Serialize}; +//! use typesense::{Client, Typesense, prelude::*}; +//! use reqwest::Url; +//! use std::time::Duration; +//! use wasm_bindgen_futures::spawn_local; +//! +//! /// A struct representing a company document. +//! #[derive(Typesense, Serialize, Deserialize, Debug)] +//! #[typesense( +//! collection_name = "companies", +//! default_sorting_field = "num_employees" +//! )] +//! struct Company { +//! company_name: String, +//! num_employees: i32, +//! #[typesense(facet)] +//! country: String, +//! } +//! +//! fn main() { +//! spawn_local(async { +//! let client = Client::builder() +//! .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) +//! .api_key("xyz") +//! .healthcheck_interval(Duration::from_secs(60)) +//! // .retry_policy(...) <-- disabled in Wasm +//! // .connection_timeout(...) <-- disabled in Wasm +//! .build() +//! .unwrap(); +//! +//! // Create the collection in Typesense +//! match client.collections().create(Company::collection_schema()).await { +//! Ok(collection) => println!("Created collection: {:?}", collection), +//! Err(e) => eprintln!("Error creating collection: {}", e), +//! } +//! }); +//! } //! } //! ``` @@ -52,7 +128,7 @@ pub mod models; pub mod prelude; pub use builders::*; -pub use client::{Client, MultiNodeConfiguration}; +pub use client::Client; pub use error::*; pub use models::*; diff --git a/typesense/src/models/api_key_actions.rs b/typesense/src/models/api_key_actions.rs new file mode 100644 index 0000000..721246c --- /dev/null +++ b/typesense/src/models/api_key_actions.rs @@ -0,0 +1,292 @@ +use serde::Serialize; +use strum::{Display, EnumString}; + +/// Defines a single, specific action that can be granted to a Typesense API Key. +/// +/// This enum provides compile-time safety and IDE autocompletion for all known +/// Typesense actions. It is marked as `#[non_exhaustive]` so that if Typesense +/// adds new actions in the future, it will not be a breaking change for your library's users. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Display, EnumString)] +#[non_exhaustive] +pub enum ApiKeyAction { + // --- Collection Actions --- + /// Allows a collection to be created. (`collections:create`) + #[serde(rename = "collections:create")] + #[strum(serialize = "collections:create")] + CollectionsCreate, + + /// Allows a collection to be deleted. (`collections:delete`) + #[serde(rename = "collections:delete")] + #[strum(serialize = "collections:delete")] + CollectionsDelete, + + /// Allows a collection schema to be retrieved. (`collections:get`) + #[serde(rename = "collections:get")] + #[strum(serialize = "collections:get")] + CollectionsGet, + + /// Allows retrieving all collection schema. (`collections:list`) + #[serde(rename = "collections:list")] + #[strum(serialize = "collections:list")] + CollectionsList, + + /// Allow all kinds of collection related operations. (`collections:*`) + #[serde(rename = "collections:*")] + #[strum(serialize = "collections:*")] + CollectionsAll, + + // --- Document Actions --- + /// Allows only search requests. (`documents:search`) + #[serde(rename = "documents:search")] + #[strum(serialize = "documents:search")] + DocumentsSearch, + + /// Allows fetching a single document. (`documents:get`) + #[serde(rename = "documents:get")] + #[strum(serialize = "documents:get")] + DocumentsGet, + + /// Allows creating documents. (`documents:create`) + #[serde(rename = "documents:create")] + #[strum(serialize = "documents:create")] + DocumentsCreate, + + /// Allows upserting documents. (`documents:upsert`) + #[serde(rename = "documents:upsert")] + #[strum(serialize = "documents:upsert")] + DocumentsUpsert, + + /// Allows updating documents. (`documents:update`) + #[serde(rename = "documents:update")] + #[strum(serialize = "documents:update")] + DocumentsUpdate, + + /// Allows deletion of documents. (`documents:delete`) + #[serde(rename = "documents:delete")] + #[strum(serialize = "documents:delete")] + DocumentsDelete, + + /// Allows import of documents in bulk. (`documents:import`) + #[serde(rename = "documents:import")] + #[strum(serialize = "documents:import")] + DocumentsImport, + + /// Allows export of documents in bulk. (`documents:export`) + #[serde(rename = "documents:export")] + #[strum(serialize = "documents:export")] + DocumentsExport, + + /// Allows all document operations. (`documents:*`) + #[serde(rename = "documents:*")] + #[strum(serialize = "documents:*")] + DocumentsAll, + + // --- Alias Actions --- + /// Allows all aliases to be fetched. (`aliases:list`) + #[serde(rename = "aliases:list")] + #[strum(serialize = "aliases:list")] + AliasesList, + + /// Allows a single alias to be retrieved (`aliases:get`) + #[serde(rename = "aliases:get")] + #[strum(serialize = "aliases:get")] + AliasesGet, + + /// Allows the creation of aliases. (`aliases:create`) + #[serde(rename = "aliases:create")] + #[strum(serialize = "aliases:create")] + AliasesCreate, + + /// Allows the deletion of aliases. (`aliases:delete`) + #[serde(rename = "aliases:delete")] + #[strum(serialize = "aliases:delete")] + AliasesDelete, + + /// Allows all alias operations. (`aliases:*`) + #[serde(rename = "aliases:*")] + #[strum(serialize = "aliases:*")] + AliasesAll, + + // --- Synonym Actions --- + /// Allows all synonyms to be fetched. (`synonyms:list`) + #[serde(rename = "synonyms:list")] + #[strum(serialize = "synonyms:list")] + SynonymsList, + + /// Allows a single synonym to be retrieved (`synonyms:get`) + #[serde(rename = "synonyms:get")] + #[strum(serialize = "synonyms:get")] + SynonymsGet, + + /// Allows the creation of synonyms. (`synonyms:create`) + #[serde(rename = "synonyms:create")] + #[strum(serialize = "synonyms:create")] + SynonymsCreate, + + /// Allows the deletion of synonyms. (`synonyms:delete`) + #[serde(rename = "synonyms:delete")] + #[strum(serialize = "synonyms:delete")] + SynonymsDelete, + + /// Allows all synonym operations. (`synonyms:*`) + #[serde(rename = "synonyms:*")] + #[strum(serialize = "synonyms:*")] + SynonymsAll, + + // --- Override Actions --- + /// Allows all overrides to be fetched. (`overrides:list`) + #[serde(rename = "overrides:list")] + #[strum(serialize = "overrides:list")] + OverridesList, + + /// Allows a single override to be retrieved (`overrides:get`) + #[serde(rename = "overrides:get")] + #[strum(serialize = "overrides:get")] + OverridesGet, + + /// Allows the creation of overrides. (`overrides:create`) + #[serde(rename = "overrides:create")] + #[strum(serialize = "overrides:create")] + OverridesCreate, + + /// Allows the deletion of overrides. (`overrides:delete`) + #[serde(rename = "overrides:delete")] + #[strum(serialize = "overrides:delete")] + OverridesDelete, + + /// Allows all override operations. (`overrides:*`) + #[serde(rename = "overrides:*")] + #[strum(serialize = "overrides:*")] + OverridesAll, + + // --- Stopwords Actions --- + /// Allows all stopword sets to be fetched. (`stopwords:list`) + #[serde(rename = "stopwords:list")] + #[strum(serialize = "stopwords:list")] + StopwordsList, + + /// Allows a single stopword set to be retrieved. (`stopwords:get`) + #[serde(rename = "stopwords:get")] + #[strum(serialize = "stopwords:get")] + StopwordsGet, + + /// Allows the creation of a stopword set. (`stopwords:create`) + #[serde(rename = "stopwords:create")] + #[strum(serialize = "stopwords:create")] + StopwordsCreate, + + /// Allows the deletion of a stopword set. (`stopwords:delete`) + #[serde(rename = "stopwords:delete")] + #[strum(serialize = "stopwords:delete")] + StopwordsDelete, + + /// Allows all stopwords operations. (`stopwords:*`) + #[serde(rename = "stopwords:*")] + #[strum(serialize = "stopwords:*")] + StopwordsAll, + + // --- Keys Actions --- + /// Allows fetching of metadata for all keys (`keys:list`) + #[serde(rename = "keys:list")] + #[strum(serialize = "keys:list")] + KeysList, + + /// Allows metadata for a single key to be fetched (`keys:get`) + #[serde(rename = "keys:get")] + #[strum(serialize = "keys:get")] + KeysGet, + + /// Allows the creation of API keys. (`keys:create`) + #[serde(rename = "keys:create")] + #[strum(serialize = "keys:create")] + KeysCreate, + + /// Allows the deletion of API keys. (`keys:delete`) + #[serde(rename = "keys:delete")] + #[strum(serialize = "keys:delete")] + KeysDelete, + + /// Allows all API Key related operations. (`keys:*`) + #[serde(rename = "keys:*")] + #[strum(serialize = "keys:*")] + KeysAll, + + // --- Analytics Actions --- + /// Allows all analytics rules and events to be fetched. (`analytics:list`) + #[serde(rename = "analytics:list")] + #[strum(serialize = "analytics:list")] + AnalyticsList, + + /// Allows for a single analytics rule or event to be fetched. (`analytics:get`) + #[serde(rename = "analytics:get")] + #[strum(serialize = "analytics:get")] + AnalyticsGet, + + /// Allows the creation of analytics rules and events. (`analytics:create`) + #[serde(rename = "analytics:create")] + #[strum(serialize = "analytics:create")] + AnalyticsCreate, + + /// Allows the deletion of analytics rules and events. (`analytics:delete`) + #[serde(rename = "analytics:delete")] + #[strum(serialize = "analytics:delete")] + AnalyticsDelete, + + /// Allows all analytics rules and events related operations. (`analytics:*`) + #[serde(rename = "analytics:*")] + #[strum(serialize = "analytics:*")] + AnalyticsAll, + + // --- Analytics Rules Actions --- + /// Allows all analytics rules to be fetched. (`analytics/rules:list`) + #[serde(rename = "analytics/rules:list")] + #[strum(serialize = "analytics/rules:list")] + AnalyticsRulesList, + + /// Allows for a single analytics rule to be fetched. (`analytics/rules:get`) + #[serde(rename = "analytics/rules:get")] + #[strum(serialize = "analytics/rules:get")] + AnalyticsRulesGet, + + /// Allows the creation of analytics rules. (`analytics/rules:create`) + #[serde(rename = "analytics/rules:create")] + #[strum(serialize = "analytics/rules:create")] + AnalyticsRulesCreate, + + /// Allows the deletion of analytics rules. (`analytics/rules:delete`) + #[serde(rename = "analytics/rules:delete")] + #[strum(serialize = "analytics/rules:delete")] + AnalyticsRulesDelete, + + /// Allows all analytics rules related operations. (`analytics/rules:*`) + #[serde(rename = "analytics/rules:*")] + #[strum(serialize = "analytics/rules:*")] + AnalyticsRulesAll, + + // --- Analytics Events Actions --- + /// Allows the creation of analytics events. (`analytics/events:create`) + #[serde(rename = "analytics/events:create")] + #[strum(serialize = "analytics/events:create")] + AnalyticsEventsCreate, + + // --- Misc Actions --- + /// Allows access to the metrics endpoint. (`metrics.json:list`) + #[serde(rename = "metrics.json:list")] + #[strum(serialize = "metrics.json:list")] + MetricsJsonList, + + /// Allows access to the stats endpoint. (`stats.json:list`) + #[serde(rename = "stats.json:list")] + #[strum(serialize = "stats.json:list")] + StatsJsonList, + + /// Allows access to the /debug endpoint. (`debug:list`) + #[serde(rename = "debug:list")] + #[strum(serialize = "debug:list")] + DebugList, + + /// Allows all operations. (`*`) + #[serde(rename = "*")] + #[strum(serialize = "*")] + All, +} diff --git a/typesense/tests/api/collection.rs b/typesense/tests/api/collection.rs index ecb4b2f..d11bfb0 100644 --- a/typesense/tests/api/collection.rs +++ b/typesense/tests/api/collection.rs @@ -1,136 +1,136 @@ -// #![allow(dead_code)] - -// use super::Config; -// use serde::{Deserialize, Serialize}; -// use typesense::document::Document; -// use typesense::Typesense; -// use typesense_codegen::apis::collections_api; -// use typesense_codegen::models::{CollectionResponse, CollectionSchema}; - -// #[derive(Typesense, Serialize, Deserialize)] -// #[typesense(collection_name = "companies", default_sorting_field = "num_employees")] -// struct Company { -// company_name: String, -// num_employees: i32, -// #[typesense(facet)] -// country: String, -// } - -// fn schema_to_resp(schema: CollectionSchema, resp: &CollectionResponse) -> CollectionResponse { -// CollectionResponse { -// name: schema.name, -// fields: schema.fields, -// default_sorting_field: schema.default_sorting_field, -// token_separators: schema.token_separators, -// enable_nested_fields: schema.enable_nested_fields, -// symbols_to_index: schema.symbols_to_index, -// num_documents: resp.num_documents, -// created_at: resp.created_at, -// } -// } - -// async fn create_collection() { -// let collection_schema_response = -// collections_api::create_collection(Config::get(), Company::collection_schema()) -// .await -// .unwrap(); - -// assert_eq!(collection_schema_response.num_documents, 0); -// assert_eq!( -// schema_to_resp(Company::collection_schema(), &collection_schema_response), -// collection_schema_response -// ); -// } - -// async fn get_collection() { -// let collection_schema_response = collections_api::get_collection(Config::get(), "companies") -// .await -// .unwrap(); - -// assert_eq!(collection_schema_response.num_documents, 1250); -// assert_eq!( -// schema_to_resp(Company::collection_schema(), &collection_schema_response), -// collection_schema_response -// ); -// } - -// async fn delete_collection() { -// let collection_schema_response = collections_api::delete_collection(Config::get(), "companies") -// .await -// .unwrap(); - -// assert_eq!(collection_schema_response.num_documents, 1200); -// assert_eq!( -// schema_to_resp(Company::collection_schema(), &collection_schema_response), -// collection_schema_response -// ); -// } - -// async fn get_collections() { -// let collection_schema_response = collections_api::get_collections(Config::get()) -// .await -// .unwrap(); - -// assert_eq!(collection_schema_response.len(), 2); -// } - -// #[cfg(all(feature = "tokio_test", not(target_arch = "wasm32")))] -// mod tokio_test { -// use super::*; - -// #[tokio::test] -// async fn create_collection_tokio() { -// create_collection().await -// } - -// #[tokio::test] -// async fn get_collection_tokio() { -// get_collection().await -// } - -// #[tokio::test] -// async fn delete_collection_tokio() { -// delete_collection().await -// } - -// #[tokio::test] -// async fn get_collections_tokio() { -// get_collections().await -// } -// } - -// #[cfg(target_arch = "wasm32")] -// mod wasm_test { -// use super::*; -// use wasm_bindgen_test::wasm_bindgen_test; - -// wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); - -// #[wasm_bindgen_test] -// async fn create_collection_wasm() { -// console_error_panic_hook::set_once(); - -// create_collection().await -// } - -// #[wasm_bindgen_test] -// async fn get_collection_wasm() { -// console_error_panic_hook::set_once(); - -// get_collection().await -// } - -// #[wasm_bindgen_test] -// async fn delete_collection_wasm() { -// console_error_panic_hook::set_once(); - -// delete_collection().await -// } - -// #[wasm_bindgen_test] -// async fn get_collections_wasm() { -// console_error_panic_hook::set_once(); - -// get_collections().await -// } -// } +#![allow(dead_code)] + +use super::Config; +use serde::{Deserialize, Serialize}; +use typesense::document::Document; +use typesense::Typesense; +use typesense_codegen::apis::collections_api; +use typesense_codegen::models::{CollectionResponse, CollectionSchema}; + +#[derive(Typesense, Serialize, Deserialize)] +#[typesense(collection_name = "companies", default_sorting_field = "num_employees")] +struct Company { + company_name: String, + num_employees: i32, + #[typesense(facet)] + country: String, +} + +fn schema_to_resp(schema: CollectionSchema, resp: &CollectionResponse) -> CollectionResponse { + CollectionResponse { + name: schema.name, + fields: schema.fields, + default_sorting_field: schema.default_sorting_field, + token_separators: schema.token_separators, + enable_nested_fields: schema.enable_nested_fields, + symbols_to_index: schema.symbols_to_index, + num_documents: resp.num_documents, + created_at: resp.created_at, + } +} + +async fn create_collection() { + let collection_schema_response = + collections_api::create_collection(Config::get(), Company::collection_schema()) + .await + .unwrap(); + + assert_eq!(collection_schema_response.num_documents, 0); + assert_eq!( + schema_to_resp(Company::collection_schema(), &collection_schema_response), + collection_schema_response + ); +} + +async fn get_collection() { + let collection_schema_response = collections_api::get_collection(Config::get(), "companies") + .await + .unwrap(); + + assert_eq!(collection_schema_response.num_documents, 1250); + assert_eq!( + schema_to_resp(Company::collection_schema(), &collection_schema_response), + collection_schema_response + ); +} + +async fn delete_collection() { + let collection_schema_response = collections_api::delete_collection(Config::get(), "companies") + .await + .unwrap(); + + assert_eq!(collection_schema_response.num_documents, 1200); + assert_eq!( + schema_to_resp(Company::collection_schema(), &collection_schema_response), + collection_schema_response + ); +} + +async fn get_collections() { + let collection_schema_response = collections_api::get_collections(Config::get()) + .await + .unwrap(); + + assert_eq!(collection_schema_response.len(), 2); +} + +#[cfg(all(feature = "tokio_test", not(target_arch = "wasm32")))] +mod tokio_test { + use super::*; + + #[tokio::test] + async fn create_collection_tokio() { + create_collection().await + } + + #[tokio::test] + async fn get_collection_tokio() { + get_collection().await + } + + #[tokio::test] + async fn delete_collection_tokio() { + delete_collection().await + } + + #[tokio::test] + async fn get_collections_tokio() { + get_collections().await + } +} + +#[cfg(target_arch = "wasm32")] +mod wasm_test { + use super::*; + use wasm_bindgen_test::wasm_bindgen_test; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + #[wasm_bindgen_test] + async fn create_collection_wasm() { + console_error_panic_hook::set_once(); + + create_collection().await + } + + #[wasm_bindgen_test] + async fn get_collection_wasm() { + console_error_panic_hook::set_once(); + + get_collection().await + } + + #[wasm_bindgen_test] + async fn delete_collection_wasm() { + console_error_panic_hook::set_once(); + + delete_collection().await + } + + #[wasm_bindgen_test] + async fn get_collections_wasm() { + console_error_panic_hook::set_once(); + + get_collections().await + } +} diff --git a/typesense/tests/api/documents.rs b/typesense/tests/api/documents.rs index c8e8f5b..885d56d 100644 --- a/typesense/tests/api/documents.rs +++ b/typesense/tests/api/documents.rs @@ -1,111 +1,111 @@ -// #![allow(dead_code)] - -// use super::Config; -// use serde::{Deserialize, Serialize}; -// use typesense::document::Document; -// use typesense::models::SearchParameters; -// use typesense::Typesense; -// use typesense_codegen::apis::documents_api; - -// #[derive(Typesense, Serialize, Deserialize)] -// #[typesense(collection_name = "companies", default_sorting_field = "num_employees")] -// struct Company { -// company_name: String, -// num_employees: i32, -// #[typesense(facet)] -// country: String, -// } - -// async fn import_documents() { -// let documents = [ -// Company { -// company_name: "test".to_owned(), -// num_employees: 1, -// country: "c1".to_owned(), -// }, -// Company { -// company_name: "test2".to_owned(), -// num_employees: 2, -// country: "c2".to_owned(), -// }, -// ] -// .map(|c| serde_json::to_string(&c).unwrap()) -// .join("\n"); - -// let resp = documents_api::import_documents( -// Config::get(), -// &Company::collection_schema().name, -// documents, -// None, -// ) -// .await -// .unwrap(); - -// assert_eq!(&resp, "{\"success\":true}\n{\"success\":true}"); -// } - -// async fn search_collection() { -// let search = SearchParameters { -// q: "test".to_owned(), -// query_by: "company_name".to_owned(), -// ..Default::default() -// }; - -// let resp = documents_api::search_collection::( -// Config::get(), -// &Company::collection_schema().name, -// search, -// ) -// .await -// .unwrap(); - -// assert_eq!(resp.found, Some(2)); -// assert_eq!( -// resp.hits -// .unwrap() -// .first() -// .unwrap() -// .document -// .as_ref() -// .unwrap() -// .company_name, -// "test".to_owned() -// ); -// } - -// #[cfg(all(feature = "tokio_test", not(target_arch = "wasm32")))] -// mod tokio_test { -// use super::*; - -// #[tokio::test] -// async fn import_documents_tokio() { -// import_documents().await -// } - -// #[tokio::test] -// async fn search_collection_tokio() { -// search_collection().await -// } -// } - -// #[cfg(target_arch = "wasm32")] -// mod wasm_test { -// use super::*; -// use wasm_bindgen_test::wasm_bindgen_test; - -// wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); - -// #[wasm_bindgen_test] -// async fn import_documents_wasm() { -// console_error_panic_hook::set_once(); - -// import_documents().await -// } - -// #[wasm_bindgen_test] -// async fn search_collection_wasm() { -// console_error_panic_hook::set_once(); - -// search_collection().await -// } -// } +#![allow(dead_code)] + +use super::Config; +use serde::{Deserialize, Serialize}; +use typesense::document::Document; +use typesense::models::SearchParameters; +use typesense::Typesense; +use typesense_codegen::apis::documents_api; + +#[derive(Typesense, Serialize, Deserialize)] +#[typesense(collection_name = "companies", default_sorting_field = "num_employees")] +struct Company { + company_name: String, + num_employees: i32, + #[typesense(facet)] + country: String, +} + +async fn import_documents() { + let documents = [ + Company { + company_name: "test".to_owned(), + num_employees: 1, + country: "c1".to_owned(), + }, + Company { + company_name: "test2".to_owned(), + num_employees: 2, + country: "c2".to_owned(), + }, + ] + .map(|c| serde_json::to_string(&c).unwrap()) + .join("\n"); + + let resp = documents_api::import_documents( + Config::get(), + &Company::collection_schema().name, + documents, + None, + ) + .await + .unwrap(); + + assert_eq!(&resp, "{\"success\":true}\n{\"success\":true}"); +} + +async fn search_collection() { + let search = SearchParameters { + q: "test".to_owned(), + query_by: "company_name".to_owned(), + ..Default::default() + }; + + let resp = documents_api::search_collection::( + Config::get(), + &Company::collection_schema().name, + search, + ) + .await + .unwrap(); + + assert_eq!(resp.found, Some(2)); + assert_eq!( + resp.hits + .unwrap() + .first() + .unwrap() + .document + .as_ref() + .unwrap() + .company_name, + "test".to_owned() + ); +} + +#[cfg(all(feature = "tokio_test", not(target_arch = "wasm32")))] +mod tokio_test { + use super::*; + + #[tokio::test] + async fn import_documents_tokio() { + import_documents().await + } + + #[tokio::test] + async fn search_collection_tokio() { + search_collection().await + } +} + +#[cfg(target_arch = "wasm32")] +mod wasm_test { + use super::*; + use wasm_bindgen_test::wasm_bindgen_test; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + #[wasm_bindgen_test] + async fn import_documents_wasm() { + console_error_panic_hook::set_once(); + + import_documents().await + } + + #[wasm_bindgen_test] + async fn search_collection_wasm() { + console_error_panic_hook::set_once(); + + search_collection().await + } +} diff --git a/typesense/tests/client/aliases_test.rs b/typesense/tests/client/aliases_test.rs index edfbae3..ad7b2a9 100644 --- a/typesense/tests/client/aliases_test.rs +++ b/typesense/tests/client/aliases_test.rs @@ -2,8 +2,7 @@ use typesense::models::{CollectionAliasSchema, CollectionSchema, Field}; use super::{get_client, new_id}; -#[tokio::test] -async fn test_aliases_and_alias_lifecycle() { +async fn logic_test_aliases_and_alias_lifecycle() { let client = get_client(); let collection_name = new_id("products"); let alias_name = new_id("products_alias"); @@ -90,3 +89,27 @@ async fn test_aliases_and_alias_lifecycle() { "Failed to delete collection after alias test" ); } + +#[cfg(all(test, not(target_arch = "wasm32")))] +mod tokio_test { + use super::*; + + #[tokio::test] + async fn test_aliases_and_alias_lifecycle() { + logic_test_aliases_and_alias_lifecycle().await; + } +} + +#[cfg(all(test, target_arch = "wasm32"))] +mod wasm_test { + use super::*; + use wasm_bindgen_test::wasm_bindgen_test; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + #[wasm_bindgen_test] + async fn test_aliases_and_alias_lifecycle() { + console_error_panic_hook::set_once(); + logic_test_aliases_and_alias_lifecycle().await; + } +} diff --git a/typesense/tests/client/analytics_test.rs b/typesense/tests/client/analytics_test.rs index 1854639..5393f68 100644 --- a/typesense/tests/client/analytics_test.rs +++ b/typesense/tests/client/analytics_test.rs @@ -6,8 +6,7 @@ use typesense::models::{ AnalyticsRulesType::Counter, CollectionSchema, Field, }; -#[tokio::test] -async fn test_analytics_rules_and_events_lifecycle() { +async fn logic_test_analytics_rules_and_events_lifecycle() { let client = get_client(); let rule_name_1 = new_id("product_clicks"); let collection_name = new_id("products"); @@ -120,3 +119,27 @@ async fn test_analytics_rules_and_events_lifecycle() { "Rule should not exist after deletion" ); } + +#[cfg(all(test, not(target_arch = "wasm32")))] +mod tokio_test { + use super::*; + + #[tokio::test] + async fn test_analytics_rules_and_events_lifecycle() { + logic_test_analytics_rules_and_events_lifecycle().await; + } +} + +#[cfg(all(test, target_arch = "wasm32"))] +mod wasm_test { + use super::*; + use wasm_bindgen_test::wasm_bindgen_test; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + #[wasm_bindgen_test] + async fn test_analytics_rules_and_events_lifecycle() { + console_error_panic_hook::set_once(); + logic_test_analytics_rules_and_events_lifecycle().await; + } +} diff --git a/typesense/tests/client/client_test.rs b/typesense/tests/client/client_test.rs index 1cfae11..931d3aa 100644 --- a/typesense/tests/client/client_test.rs +++ b/typesense/tests/client/client_test.rs @@ -1,3 +1,5 @@ +#![cfg(not(target_family = "wasm"))] + use reqwest::Url; use reqwest_retry::policies::ExponentialBackoff; use std::time::Duration; @@ -40,15 +42,16 @@ async fn setup_mock_server_404(server: &MockServer, collection_name: &str) { } // Helper function to create a client configuration for tests. -fn get_test_config(nodes: Vec, nearest_node: Option) -> MultiNodeConfiguration { - MultiNodeConfiguration { - nodes, - nearest_node, - api_key: "test-key".to_string(), - healthcheck_interval: Duration::from_secs(60), - retry_policy: ExponentialBackoff::builder().build_with_max_retries(0), - connection_timeout: Duration::from_secs(1), - } +fn get_client(nodes: Vec, nearest_node: Option) -> Client { + Client::builder() + .nodes(nodes) + .maybe_nearest_node(nearest_node) + .api_key("test-key") + .healthcheck_interval(Duration::from_secs(60)) + .retry_policy(ExponentialBackoff::builder().build_with_max_retries(0)) + .connection_timeout(Duration::from_secs(1)) + .build() + .expect("Failed to create client") } #[tokio::test] @@ -56,8 +59,7 @@ async fn test_success_on_first_node() { let server1 = MockServer::start().await; setup_mock_server_ok(&server1, "products").await; - let config = get_test_config(vec![Url::parse(&server1.uri()).unwrap()], None); - let client = Client::new(config).unwrap(); + let client = get_client(vec![Url::parse(&server1.uri()).unwrap()], None); let result = client.collection("products").retrieve().await; @@ -74,14 +76,13 @@ async fn test_failover_to_second_node() { setup_mock_server_503(&server1, "products").await; setup_mock_server_ok(&server2, "products").await; - let config = get_test_config( + let client = get_client( vec![ Url::parse(&server1.uri()).unwrap(), Url::parse(&server2.uri()).unwrap(), ], None, ); - let client = Client::new(config).unwrap(); let result = client.collection("products").retrieve().await; assert!(result.is_ok()); @@ -99,11 +100,10 @@ async fn test_nearest_node_is_prioritized() { setup_mock_server_ok(&nearest_server, "products").await; setup_mock_server_ok(®ular_server, "products").await; - let config = get_test_config( + let client = get_client( vec![Url::parse(®ular_server.uri()).unwrap()], Some(Url::parse(&nearest_server.uri()).unwrap()), ); - let client = Client::new(config).unwrap(); let result = client.collection("products").retrieve().await; assert!(result.is_ok()); @@ -120,11 +120,10 @@ async fn test_failover_from_nearest_to_regular_node() { setup_mock_server_503(&nearest_server, "products").await; setup_mock_server_ok(®ular_server, "products").await; - let config = get_test_config( + let client = get_client( vec![Url::parse(®ular_server.uri()).unwrap()], Some(Url::parse(&nearest_server.uri()).unwrap()), ); - let client = Client::new(config).unwrap(); let result = client.collection("products").retrieve().await; assert!(result.is_ok()); @@ -144,7 +143,7 @@ async fn test_round_robin_failover() { setup_mock_server_503(&server2, "products").await; setup_mock_server_ok(&server3, "products").await; - let config = get_test_config( + let client = get_client( vec![ Url::parse(&server1.uri()).unwrap(), Url::parse(&server2.uri()).unwrap(), @@ -152,7 +151,6 @@ async fn test_round_robin_failover() { ], None, ); - let client = Client::new(config).unwrap(); // First request should fail over to the third node let result = client.collection("products").retrieve().await; @@ -191,16 +189,17 @@ async fn test_health_check_and_node_recovery() { setup_mock_server_503(&server1, "products").await; setup_mock_server_ok(&server2, "products").await; - let mut config = get_test_config( - vec![ + let client = Client::builder() + .nodes(vec![ Url::parse(&server1.uri()).unwrap(), Url::parse(&server2.uri()).unwrap(), - ], - None, - ); - // Use a very short healthcheck interval for the test - config.healthcheck_interval = Duration::from_millis(500); - let client = Client::new(config).unwrap(); + ]) + .api_key("test-key") + .healthcheck_interval(Duration::from_millis(500)) // Use a very short healthcheck interval for the test + .retry_policy(ExponentialBackoff::builder().build_with_max_retries(0)) + .connection_timeout(Duration::from_secs(1)) + .build() + .expect("Failed to create client"); // 1. First request fails over to server2, marking server1 as unhealthy. assert!(client.collection("products").retrieve().await.is_ok()); @@ -232,14 +231,13 @@ async fn test_all_nodes_fail() { setup_mock_server_503(&server1, "products").await; setup_mock_server_503(&server2, "products").await; - let config = get_test_config( + let client = get_client( vec![ Url::parse(&server1.uri()).unwrap(), Url::parse(&server2.uri()).unwrap(), ], None, ); - let client = Client::new(config).unwrap(); let result = client.collection("products").retrieve().await; assert!(result.is_err()); @@ -262,14 +260,13 @@ async fn test_fail_fast_on_non_retriable_error() { setup_mock_server_404(&server1, "products").await; setup_mock_server_ok(&server2, "products").await; - let config = get_test_config( + let client = get_client( vec![ Url::parse(&server1.uri()).unwrap(), Url::parse(&server2.uri()).unwrap(), ], None, ); - let client = Client::new(config).unwrap(); let result = client.collection("products").retrieve().await; assert!(result.is_err()); @@ -299,7 +296,7 @@ async fn test_load_balancing_with_healthy_nodes() { setup_mock_server_ok(&server3, "products").await; // 2. Setup client with the three nodes - let config = get_test_config( + let client = get_client( vec![ Url::parse(&server1.uri()).unwrap(), Url::parse(&server2.uri()).unwrap(), @@ -307,7 +304,6 @@ async fn test_load_balancing_with_healthy_nodes() { ], None, ); - let client = Client::new(config).unwrap(); // 3. Make three consecutive requests let result1 = client.collection("products").retrieve().await; diff --git a/typesense/tests/client/collections_test.rs b/typesense/tests/client/collections_test.rs index e9e374c..34a5fec 100644 --- a/typesense/tests/client/collections_test.rs +++ b/typesense/tests/client/collections_test.rs @@ -2,8 +2,7 @@ use typesense::models::{CollectionSchema, CollectionUpdateSchema, Field}; use super::{get_client, new_id}; -#[tokio::test] -async fn test_collections_and_collection_lifecycle() { +async fn logic_test_collections_and_collection_lifecycle() { let client = get_client(); let collection_name = new_id("products"); @@ -131,3 +130,27 @@ async fn test_collections_and_collection_lifecycle() { "Collection should not exist after deletion" ); } + +#[cfg(all(test, not(target_arch = "wasm32")))] +mod tokio_test { + use super::*; + + #[tokio::test] + async fn test_collections_and_collection_lifecycle() { + logic_test_collections_and_collection_lifecycle().await; + } +} + +#[cfg(all(test, target_arch = "wasm32"))] +mod wasm_test { + use super::*; + use wasm_bindgen_test::wasm_bindgen_test; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + #[wasm_bindgen_test] + async fn test_collections_and_collection_lifecycle() { + console_error_panic_hook::set_once(); + logic_test_collections_and_collection_lifecycle().await; + } +} diff --git a/typesense/tests/client/conversation_models_test.rs b/typesense/tests/client/conversation_models_test.rs index 5e58fca..690e9ed 100644 --- a/typesense/tests/client/conversation_models_test.rs +++ b/typesense/tests/client/conversation_models_test.rs @@ -1,3 +1,5 @@ +#![cfg(not(target_arch = "wasm32"))] + use std::time::Duration; use reqwest_retry::policies::ExponentialBackoff; @@ -5,7 +7,7 @@ use typesense::{ models::{ CollectionSchema, ConversationModelCreateSchema, ConversationModelUpdateSchema, Field, }, - Error as TypesenseError, MultiNodeConfiguration, + Error as TypesenseError, }; use super::{get_client, new_id}; @@ -117,16 +119,14 @@ use wiremock::{ // Helper to create a Typesense client configured for a mock server. fn get_test_client(uri: &str) -> Client { - let config = MultiNodeConfiguration { - nodes: vec![uri.parse().unwrap()], - nearest_node: None, // Not needed for single-node tests - api_key: "TEST_API_KEY".to_string(), - // Keep other settings minimal for testing - healthcheck_interval: Duration::from_secs(60), - retry_policy: ExponentialBackoff::builder().build_with_max_retries(0), - connection_timeout: Duration::from_secs(1), - }; - Client::new(config).unwrap() + Client::builder() + .nodes(vec![uri.parse().unwrap()]) + .api_key("TEST_API_KEY") + .healthcheck_interval(Duration::from_secs(60)) + .retry_policy(ExponentialBackoff::builder().build_with_max_retries(0)) + .connection_timeout(Duration::from_secs(1)) + .build() + .expect("Failed to create client") } #[tokio::test] diff --git a/typesense/tests/client/derive_integration_test.rs b/typesense/tests/client/derive_integration_test.rs index 7f41b0e..caf9de5 100644 --- a/typesense/tests/client/derive_integration_test.rs +++ b/typesense/tests/client/derive_integration_test.rs @@ -80,8 +80,7 @@ struct MegaProduct { tags: Option>, } -#[tokio::test] -async fn test_derive_macro_with_generic_client_lifecycle() { +async fn logic_test_derive_macro_with_generic_client_lifecycle() { let client = get_client(); let collection_name = new_id("mega_products_test"); @@ -460,8 +459,7 @@ struct ManualFlattenedProduct { details_weight_kg: f32, } -#[tokio::test] -async fn test_manual_flattening_lifecycle() { +async fn logic_test_manual_flattening_lifecycle() { let client = get_client(); let collection_name = new_id("manual_flat_test"); @@ -536,3 +534,38 @@ async fn test_manual_flattening_lifecycle() { "Should find document by indexed flattened field" ); } + +#[cfg(all(test, not(target_arch = "wasm32")))] +mod tokio_test { + use super::*; + + #[tokio::test] + async fn test_derive_macro_with_generic_client_lifecycle() { + logic_test_derive_macro_with_generic_client_lifecycle().await; + } + + #[tokio::test] + async fn test_manual_flattening_lifecycle() { + logic_test_manual_flattening_lifecycle().await; + } +} + +#[cfg(all(test, target_arch = "wasm32"))] +mod wasm_test { + use super::*; + use wasm_bindgen_test::wasm_bindgen_test; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + #[wasm_bindgen_test] + async fn test_derive_macro_with_generic_client_lifecycle() { + console_error_panic_hook::set_once(); + logic_test_derive_macro_with_generic_client_lifecycle().await; + } + + #[wasm_bindgen_test] + async fn test_manual_flattening_lifecycle() { + console_error_panic_hook::set_once(); + logic_test_manual_flattening_lifecycle().await; + } +} diff --git a/typesense/tests/client/documents_test.rs b/typesense/tests/client/documents_test.rs index 80673bb..c0a5e87 100644 --- a/typesense/tests/client/documents_test.rs +++ b/typesense/tests/client/documents_test.rs @@ -8,8 +8,7 @@ use typesense::models::{ use super::{get_client, new_id}; -#[tokio::test] -async fn test_document_lifecycle() { +async fn run_test_document_lifecycle() { let client = get_client(); let collection_name = new_id("books"); @@ -205,8 +204,7 @@ struct Book { in_stock: Option, } -#[tokio::test] -async fn test_generic_document_lifecycle() { +async fn run_test_generic_document_lifecycle() { let client = get_client(); let collection_name = new_id("generic_books"); @@ -341,3 +339,37 @@ async fn test_generic_document_lifecycle() { "Typed document should not exist after deletion" ); } + +#[cfg(all(test, not(target_arch = "wasm32")))] +mod tokio_test { + use super::*; + + #[tokio::test] + async fn test_document_lifecycle() { + run_test_document_lifecycle().await; + } + #[tokio::test] + async fn test_generic_document_lifecycle() { + run_test_generic_document_lifecycle().await; + } +} + +#[cfg(all(test, target_arch = "wasm32"))] +mod wasm_test { + use super::*; + use wasm_bindgen_test::wasm_bindgen_test; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + #[wasm_bindgen_test] + async fn test_document_lifecycle() { + console_error_panic_hook::set_once(); + run_test_document_lifecycle().await; + } + + #[wasm_bindgen_test] + async fn test_generic_document_lifecycle() { + console_error_panic_hook::set_once(); + run_test_generic_document_lifecycle().await; + } +} diff --git a/typesense/tests/client/keys_test.rs b/typesense/tests/client/keys_test.rs index e56ccc3..95683ac 100644 --- a/typesense/tests/client/keys_test.rs +++ b/typesense/tests/client/keys_test.rs @@ -1,8 +1,7 @@ use super::get_client; use typesense::models::{ApiKeySchema, ScopedKeyParameters, SearchParameters}; -#[tokio::test] -async fn test_keys_lifecycle() { +async fn run_test_keys_lifecycle() { let client = get_client(); let key_description = "A test search-only key."; @@ -119,3 +118,27 @@ fn test_generate_scoped_search_key_with_example_values() { "The generated key does not match the expected key." ); } + +#[cfg(all(test, not(target_arch = "wasm32")))] +mod tokio_test { + use super::*; + + #[tokio::test] + async fn test_keys_lifecycle() { + run_test_keys_lifecycle().await; + } +} + +#[cfg(all(test, target_arch = "wasm32"))] +mod wasm_test { + use super::*; + use wasm_bindgen_test::wasm_bindgen_test; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + #[wasm_bindgen_test] + async fn test_keys_lifecycle() { + console_error_panic_hook::set_once(); + run_test_keys_lifecycle().await; + } +} diff --git a/typesense/tests/client/mod.rs b/typesense/tests/client/mod.rs index d32f739..8befdb6 100644 --- a/typesense/tests/client/mod.rs +++ b/typesense/tests/client/mod.rs @@ -16,20 +16,19 @@ mod synonyms_test; use reqwest::Url; use reqwest_retry::policies::ExponentialBackoff; use std::time::Duration; -use std::time::{SystemTime, UNIX_EPOCH}; -use typesense::{Client, MultiNodeConfiguration}; +use typesense::Client; +use web_time::{SystemTime, UNIX_EPOCH}; /// Helper function to create a new client for all tests in this suite. pub fn get_client() -> Client { - let config = MultiNodeConfiguration { - nodes: vec![Url::parse("http://localhost:8108").unwrap()], - nearest_node: None, - api_key: "xyz".to_string(), - healthcheck_interval: Duration::from_secs(5), - retry_policy: ExponentialBackoff::builder().build_with_max_retries(1), - connection_timeout: Duration::from_secs(3), - }; - Client::new(config).unwrap() + Client::builder() + .nodes(vec![Url::parse("http://localhost:8108").unwrap()]) + .api_key("xyz") + .healthcheck_interval(Duration::from_secs(5)) + .retry_policy(ExponentialBackoff::builder().build_with_max_retries(1)) + .connection_timeout(Duration::from_secs(3)) + .build() + .expect("Failed to create Typesense client") } /// Generates a unique name for a test resource by combining a prefix, diff --git a/typesense/tests/client/multi_search_test.rs b/typesense/tests/client/multi_search_test.rs index 1d2da59..9489dee 100644 --- a/typesense/tests/client/multi_search_test.rs +++ b/typesense/tests/client/multi_search_test.rs @@ -83,8 +83,7 @@ async fn setup_multi_search_tests( .unwrap(); } -#[tokio::test] -async fn test_multi_search_federated() { +async fn run_test_multi_search_federated() { let client = get_client(); let products_collection_name = new_id("products"); let brands_collection_name = new_id("brands"); @@ -152,8 +151,7 @@ async fn test_multi_search_federated() { ); } -#[tokio::test] -async fn test_multi_search_with_common_params() { +async fn run_test_multi_search_with_common_params() { let client = get_client(); let products_collection_name = new_id("products_common"); let brands_collection_name = new_id("brands_common"); @@ -239,8 +237,7 @@ struct Brand { country: String, } -#[tokio::test] -async fn test_multi_search_generic_parsing() { +async fn run_test_multi_search_generic_parsing() { let client = get_client(); let products_collection_name = new_id("products_generic"); let brands_collection_name = new_id("brands_generic"); @@ -335,8 +332,7 @@ async fn test_multi_search_generic_parsing() { ); } -#[tokio::test] -async fn test_multi_search_union_heterogeneous() { +async fn run_test_multi_search_union_heterogeneous() { let client = get_client(); let products_collection_name = new_id("products_union"); let brands_collection_name = new_id("brands_union"); @@ -417,8 +413,7 @@ async fn test_multi_search_union_heterogeneous() { ); } -#[tokio::test] -async fn test_multi_search_union_homogeneous_and_typed_conversion() { +async fn run_test_multi_search_union_homogeneous_and_typed_conversion() { let client = get_client(); let products_collection_name = new_id("products_union_homo"); // We only need one collection for this test, but the setup creates two. @@ -474,3 +469,63 @@ async fn test_multi_search_union_homogeneous_and_typed_conversion() { assert_eq!(macbook.name, "MacBook Pro"); assert_eq!(macbook.price, 1999); } + +#[cfg(all(test, not(target_arch = "wasm32")))] +mod tokio_test { + use super::*; + + #[tokio::test] + async fn test_multi_search_federated() { + run_test_multi_search_federated().await; + } + #[tokio::test] + async fn test_multi_search_with_common_params() { + run_test_multi_search_with_common_params().await; + } + #[tokio::test] + async fn test_multi_search_generic_parsing() { + run_test_multi_search_generic_parsing().await; + } + #[tokio::test] + async fn test_multi_search_union_heterogeneous() { + run_test_multi_search_union_heterogeneous().await; + } + #[tokio::test] + async fn test_multi_search_union_homogeneous_and_typed_conversion() { + run_test_multi_search_union_homogeneous_and_typed_conversion().await; + } +} + +#[cfg(all(test, target_arch = "wasm32"))] +mod wasm_test { + use super::*; + use wasm_bindgen_test::wasm_bindgen_test; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + #[wasm_bindgen_test] + async fn test_multi_search_federated() { + console_error_panic_hook::set_once(); + run_test_multi_search_federated().await; + } + #[wasm_bindgen_test] + async fn test_multi_search_with_common_params() { + console_error_panic_hook::set_once(); + run_test_multi_search_with_common_params().await; + } + #[wasm_bindgen_test] + async fn test_multi_search_generic_parsing() { + console_error_panic_hook::set_once(); + run_test_multi_search_generic_parsing().await; + } + #[wasm_bindgen_test] + async fn test_multi_search_union_heterogeneous() { + console_error_panic_hook::set_once(); + run_test_multi_search_union_heterogeneous().await; + } + #[wasm_bindgen_test] + async fn test_multi_search_union_homogeneous_and_typed_conversion() { + console_error_panic_hook::set_once(); + run_test_multi_search_union_homogeneous_and_typed_conversion().await; + } +} diff --git a/typesense/tests/client/presets_test.rs b/typesense/tests/client/presets_test.rs index ec4b492..c653ce7 100644 --- a/typesense/tests/client/presets_test.rs +++ b/typesense/tests/client/presets_test.rs @@ -4,8 +4,7 @@ use typesense::models::{ use super::{get_client, new_id}; -#[tokio::test] -async fn test_presets_lifecycle() { +async fn run_test_presets_lifecycle() { let client = get_client(); let preset_id = new_id("search-preset"); @@ -84,3 +83,27 @@ async fn test_presets_lifecycle() { "Preset should not exist after deletion." ); } + +#[cfg(all(test, not(target_arch = "wasm32")))] +mod tokio_test { + use super::*; + + #[tokio::test] + async fn test_presets_lifecycle() { + run_test_presets_lifecycle().await; + } +} + +#[cfg(all(test, target_arch = "wasm32"))] +mod wasm_test { + use super::*; + use wasm_bindgen_test::wasm_bindgen_test; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + #[wasm_bindgen_test] + async fn test_presets_lifecycle() { + console_error_panic_hook::set_once(); + run_test_presets_lifecycle().await; + } +} diff --git a/typesense/tests/client/search_overrides_test.rs b/typesense/tests/client/search_overrides_test.rs index 8b61a0b..661c228 100644 --- a/typesense/tests/client/search_overrides_test.rs +++ b/typesense/tests/client/search_overrides_test.rs @@ -4,8 +4,7 @@ use typesense::models::{ use super::{get_client, new_id}; -#[tokio::test] -async fn test_search_overrides_lifecycle() { +async fn run_test_search_overrides_lifecycle() { let client = get_client(); let collection_name = new_id("products"); let override_id = new_id("promo_products"); @@ -118,3 +117,27 @@ async fn test_search_overrides_lifecycle() { "Failed to delete collection after test." ); } + +#[cfg(all(test, not(target_arch = "wasm32")))] +mod tokio_test { + use super::*; + + #[tokio::test] + async fn test_search_overrides_lifecycle() { + run_test_search_overrides_lifecycle().await; + } +} + +#[cfg(all(test, target_arch = "wasm32"))] +mod wasm_test { + use super::*; + use wasm_bindgen_test::wasm_bindgen_test; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + #[wasm_bindgen_test] + async fn test_search_overrides_lifecycle() { + console_error_panic_hook::set_once(); + run_test_search_overrides_lifecycle().await; + } +} diff --git a/typesense/tests/client/stemming_dictionaries_test.rs b/typesense/tests/client/stemming_dictionaries_test.rs index 60b0d91..bcde457 100644 --- a/typesense/tests/client/stemming_dictionaries_test.rs +++ b/typesense/tests/client/stemming_dictionaries_test.rs @@ -1,7 +1,6 @@ use crate::{get_client, new_id}; -#[tokio::test] -async fn test_stemming_dictionary_import_and_retrieve() { +async fn run_test_stemming_dictionary_import_and_retrieve() { let client = get_client(); let dictionary_id = new_id("verb_stems_v2"); @@ -71,3 +70,27 @@ async fn test_stemming_dictionary_import_and_retrieve() { "The newly imported dictionary's ID was not found in the master list." ); } + +#[cfg(all(test, not(target_arch = "wasm32")))] +mod tokio_test { + use super::*; + + #[tokio::test] + async fn test_stemming_dictionary_import_and_retrieve() { + run_test_stemming_dictionary_import_and_retrieve().await; + } +} + +#[cfg(all(test, target_arch = "wasm32"))] +mod wasm_test { + use super::*; + use wasm_bindgen_test::wasm_bindgen_test; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + #[wasm_bindgen_test] + async fn test_stemming_dictionary_import_and_retrieve() { + console_error_panic_hook::set_once(); + run_test_stemming_dictionary_import_and_retrieve().await; + } +} diff --git a/typesense/tests/client/stopwords_test.rs b/typesense/tests/client/stopwords_test.rs index da863f4..6e64c10 100644 --- a/typesense/tests/client/stopwords_test.rs +++ b/typesense/tests/client/stopwords_test.rs @@ -2,8 +2,7 @@ use typesense::models::StopwordsSetUpsertSchema; use super::{get_client, new_id}; -#[tokio::test] -async fn test_stopwords_and_stopword_lifecycle() { +async fn run_test_stopwords_and_stopword_lifecycle() { let client = get_client(); let set_id = new_id("custom_stopwords"); @@ -57,3 +56,27 @@ async fn test_stopwords_and_stopword_lifecycle() { "Stopwords set should not exist after deletion" ); } + +#[cfg(all(test, not(target_arch = "wasm32")))] +mod tokio_test { + use super::*; + + #[tokio::test] + async fn test_stopwords_and_stopword_lifecycle() { + run_test_stopwords_and_stopword_lifecycle().await; + } +} + +#[cfg(all(test, target_arch = "wasm32"))] +mod wasm_test { + use super::*; + use wasm_bindgen_test::wasm_bindgen_test; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + #[wasm_bindgen_test] + async fn test_stopwords_and_stopword_lifecycle() { + console_error_panic_hook::set_once(); + run_test_stopwords_and_stopword_lifecycle().await; + } +} diff --git a/typesense/tests/client/synonyms_test.rs b/typesense/tests/client/synonyms_test.rs index d021413..aa874ce 100644 --- a/typesense/tests/client/synonyms_test.rs +++ b/typesense/tests/client/synonyms_test.rs @@ -2,8 +2,7 @@ use typesense::models::{CollectionSchema, Field, SearchSynonymSchema}; use super::{get_client, new_id}; -#[tokio::test] -async fn test_synonyms_lifecycle() { +async fn run_test_synonyms_lifecycle() { let client = get_client(); let collection_name = new_id("products"); let synonym_id = new_id("synonym-123"); @@ -110,3 +109,27 @@ async fn test_synonyms_lifecycle() { "Failed to delete collection after synonym test" ); } + +#[cfg(all(test, not(target_arch = "wasm32")))] +mod tokio_test { + use super::*; + + #[tokio::test] + async fn test_synonyms_lifecycle() { + run_test_synonyms_lifecycle().await; + } +} + +#[cfg(all(test, target_arch = "wasm32"))] +mod wasm_test { + use super::*; + use wasm_bindgen_test::wasm_bindgen_test; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + #[wasm_bindgen_test] + async fn test_synonyms_lifecycle() { + console_error_panic_hook::set_once(); + run_test_synonyms_lifecycle().await; + } +} diff --git a/typesense_codegen/.openapi-generator-ignore b/typesense_codegen/.openapi-generator-ignore index 0161386..ad5edb3 100644 --- a/typesense_codegen/.openapi-generator-ignore +++ b/typesense_codegen/.openapi-generator-ignore @@ -22,4 +22,6 @@ # Then explicitly reverse the ignore rule for a single file: #!docs/README.md -Cargo.toml \ No newline at end of file +Cargo.toml +src/apis/configuration.rs +src/apis/mod.rs \ No newline at end of file diff --git a/typesense_codegen/Cargo.toml b/typesense_codegen/Cargo.toml index 8984424..a039c69 100644 --- a/typesense_codegen/Cargo.toml +++ b/typesense_codegen/Cargo.toml @@ -11,8 +11,11 @@ serde = { version = "^1.0", features = ["derive"] } serde_json = "^1.0" serde_repr = "^0.1" url = "^2.5" -reqwest = { version = "^0.12", default-features = false, features = ["json", "multipart"] } -reqwest-middleware = { version = "^0.4", features = ["json", "multipart"] } +# used in both targets by the generator +reqwest = { version = "0.12", default-features = false, features = ["json"] } +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +reqwest-middleware = { version = "0.4", features = ["json"] } + [features] default = ["native-tls"] native-tls = ["reqwest/native-tls"] diff --git a/typesense_codegen/src/apis/configuration.rs b/typesense_codegen/src/apis/configuration.rs index 8d3c1c1..d433e31 100644 --- a/typesense_codegen/src/apis/configuration.rs +++ b/typesense_codegen/src/apis/configuration.rs @@ -4,17 +4,22 @@ * An open source search engine for building delightful search experiences. * * The version of the OpenAPI document: 28.0 - * + * * Generated by: https://openapi-generator.tech */ +// This file was added to .openapi-generator-ignore so we can freely modify it. +#[cfg(target_arch = "wasm32")] +pub type HttpClient = reqwest::Client; +#[cfg(not(target_arch = "wasm32"))] +pub type HttpClient = reqwest_middleware::ClientWithMiddleware; #[derive(Debug, Clone)] pub struct Configuration { pub base_path: String, pub user_agent: Option, - pub client: reqwest_middleware::ClientWithMiddleware, + pub client: HttpClient, pub basic_auth: Option, pub oauth_access_token: Option, pub bearer_access_token: Option, @@ -29,7 +34,6 @@ pub struct ApiKey { pub key: String, } - impl Configuration { pub fn new() -> Configuration { Configuration::default() @@ -38,10 +42,15 @@ impl Configuration { impl Default for Configuration { fn default() -> Self { + #[cfg(target_arch = "wasm32")] + let client = reqwest::Client::new(); + + #[cfg(not(target_arch = "wasm32"))] + let client = reqwest_middleware::ClientBuilder::new(reqwest::Client::new()).build(); Configuration { base_path: "http://localhost".to_owned(), user_agent: Some("OpenAPI-Generator/28.0/rust".to_owned()), - client: reqwest_middleware::ClientBuilder::new(reqwest::Client::new()).build(), + client, basic_auth: None, oauth_access_token: None, bearer_access_token: None, diff --git a/typesense_codegen/src/apis/mod.rs b/typesense_codegen/src/apis/mod.rs index 070f27a..85b8f9e 100644 --- a/typesense_codegen/src/apis/mod.rs +++ b/typesense_codegen/src/apis/mod.rs @@ -11,16 +11,18 @@ pub struct ResponseContent { #[derive(Debug)] pub enum Error { Reqwest(reqwest::Error), + #[cfg(not(target_arch = "wasm32"))] ReqwestMiddleware(reqwest_middleware::Error), Serde(serde_json::Error), Io(std::io::Error), ResponseError(ResponseContent), } -impl fmt::Display for Error { +impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let (module, e) = match self { Error::Reqwest(e) => ("reqwest", e.to_string()), + #[cfg(not(target_arch = "wasm32"))] Error::ReqwestMiddleware(e) => ("reqwest-middleware", e.to_string()), Error::Serde(e) => ("serde", e.to_string()), Error::Io(e) => ("IO", e.to_string()), @@ -30,10 +32,11 @@ impl fmt::Display for Error { } } -impl error::Error for Error { +impl error::Error for Error { fn source(&self) -> Option<&(dyn error::Error + 'static)> { Some(match self { Error::Reqwest(e) => e, + #[cfg(not(target_arch = "wasm32"))] Error::ReqwestMiddleware(e) => e, Error::Serde(e) => e, Error::Io(e) => e, @@ -42,25 +45,26 @@ impl error::Error for Error { } } -impl From for Error { +impl From for Error { fn from(e: reqwest::Error) -> Self { Error::Reqwest(e) } } +#[cfg(not(target_arch = "wasm32"))] impl From for Error { fn from(e: reqwest_middleware::Error) -> Self { Error::ReqwestMiddleware(e) } } -impl From for Error { +impl From for Error { fn from(e: serde_json::Error) -> Self { Error::Serde(e) } } -impl From for Error { +impl From for Error { fn from(e: std::io::Error) -> Self { Error::Io(e) } @@ -87,8 +91,10 @@ pub fn parse_deep_object(prefix: &str, value: &serde_json::Value) -> Vec<(String value, )); } - }, - serde_json::Value::String(s) => params.push((format!("{}[{}]", prefix, key), s.clone())), + } + serde_json::Value::String(s) => { + params.push((format!("{}[{}]", prefix, key), s.clone())) + } _ => params.push((format!("{}[{}]", prefix, key), value.to_string())), } } @@ -105,7 +111,7 @@ pub fn parse_deep_object(prefix: &str, value: &serde_json::Value) -> Vec<(String enum ContentType { Json, Text, - Unsupported(String) + Unsupported(String), } impl From<&str> for ContentType { From 89a2e48b689ec551f731b930d0abeff385244e39 Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Wed, 20 Aug 2025 23:01:41 +0700 Subject: [PATCH 13/21] fix: merge conflicts --- typesense/Cargo.toml | 5 ----- typesense_derive/src/field_attrs.rs | 14 ++++++++++---- typesense_derive/src/lib.rs | 10 +++++----- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/typesense/Cargo.toml b/typesense/Cargo.toml index 23cdd74..d55244e 100644 --- a/typesense/Cargo.toml +++ b/typesense/Cargo.toml @@ -1,10 +1,5 @@ [package] name = "typesense" -version = "0.1.0" -authors = ["Typesense "] -edition = "2021" -license = "Apache-2.0" -description = "WIP client for typesense" version.workspace = true authors.workspace = true repository.workspace = true diff --git a/typesense_derive/src/field_attrs.rs b/typesense_derive/src/field_attrs.rs index 9ba96a3..66527b3 100644 --- a/typesense_derive/src/field_attrs.rs +++ b/typesense_derive/src/field_attrs.rs @@ -1,7 +1,7 @@ use crate::{skip_eq, string_literal}; use proc_macro2::TokenTree; use quote::quote; -use syn::{spanned::Spanned, Attribute, Field}; +use syn::{Attribute, Field, spanned::Spanned}; #[derive(Default)] struct FieldAttrs { @@ -191,7 +191,13 @@ fn extract_field_attrs(attrs: &[Attribute]) -> syn::Result { // --- Flags that are ONLY shorthand --- "flatten" | "skip" => { if !is_shorthand { - return Err(syn::Error::new(i.span(), format!("`{}` is a flag and does not take a value. Use `#[typesense({})]`", ident_str, ident_str))); + return Err(syn::Error::new( + i.span(), + format!( + "`{}` is a flag and does not take a value. Use `#[typesense({})]`", + ident_str, ident_str + ), + )); } match ident_str.as_str() { "flatten" => { @@ -291,13 +297,13 @@ fn extract_field_attrs(attrs: &[Attribute]) -> syn::Result { // Get the inner type for a given wrappper fn ty_inner_type<'a>(ty: &'a syn::Type, wrapper: &'static str) -> Option<&'a syn::Type> { - if let syn::Type::Path(ref p) = ty { + if let syn::Type::Path(p) = ty { if p.path.segments.len() == 1 && p.path.segments[0].ident == wrapper { if let syn::PathArguments::AngleBracketed(ref inner_ty) = p.path.segments[0].arguments { if inner_ty.args.len() == 1 { // len is 1 so this should not fail let inner_ty = inner_ty.args.first().unwrap(); - if let syn::GenericArgument::Type(ref t) = inner_ty { + if let syn::GenericArgument::Type(t) = inner_ty { return Some(t); } } diff --git a/typesense_derive/src/lib.rs b/typesense_derive/src/lib.rs index fc1d9a3..5620f39 100644 --- a/typesense_derive/src/lib.rs +++ b/typesense_derive/src/lib.rs @@ -1,7 +1,7 @@ use proc_macro::TokenStream; use proc_macro2::{Ident, TokenTree}; -use quote::{quote, ToTokens}; -use syn::{spanned::Spanned, Attribute, ItemStruct}; +use quote::{ToTokens, quote}; +use syn::{Attribute, ItemStruct, spanned::Spanned}; mod field_attrs; use field_attrs::process_field; @@ -99,7 +99,7 @@ fn impl_typesense_collection(item: ItemStruct) -> syn::Result { proc_macro2::TokenStream::new() }; - let gen = quote! { + let generated_code = quote! { impl #impl_generics typesense::prelude::Document for #ident #ty_generics #where_clause { fn collection_schema() -> typesense::models::CollectionSchema { let name = #collection_name.to_owned(); @@ -250,13 +250,13 @@ fn string_list(tt_iter: &mut impl Iterator) -> syn::Result { return Err(syn::Error::new( proc_macro2::Span::call_site(), "Expected a list in brackets `[]`", - )) + )); } }; From 963785e33557755f624e3f6f8f72ce8010c2c8f6 Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Thu, 21 Aug 2025 10:26:04 +0700 Subject: [PATCH 14/21] remove old tests --- typesense/tests/api/collection.rs | 136 ------------------------------ typesense/tests/api/documents.rs | 111 ------------------------ typesense/tests/api/lib.rs | 41 --------- 3 files changed, 288 deletions(-) delete mode 100644 typesense/tests/api/collection.rs delete mode 100644 typesense/tests/api/documents.rs delete mode 100644 typesense/tests/api/lib.rs diff --git a/typesense/tests/api/collection.rs b/typesense/tests/api/collection.rs deleted file mode 100644 index 23016fd..0000000 --- a/typesense/tests/api/collection.rs +++ /dev/null @@ -1,136 +0,0 @@ -#![allow(dead_code)] - -use super::Config; -use serde::{Deserialize, Serialize}; -use typesense::Typesense; -use typesense::document::Document; -use typesense_codegen::apis::collections_api; -use typesense_codegen::models::{CollectionResponse, CollectionSchema}; - -#[derive(Typesense, Serialize, Deserialize)] -#[typesense(collection_name = "companies", default_sorting_field = "num_employees")] -struct Company { - company_name: String, - num_employees: i32, - #[typesense(facet)] - country: String, -} - -fn schema_to_resp(schema: CollectionSchema, resp: &CollectionResponse) -> CollectionResponse { - CollectionResponse { - name: schema.name, - fields: schema.fields, - default_sorting_field: schema.default_sorting_field, - token_separators: schema.token_separators, - enable_nested_fields: schema.enable_nested_fields, - symbols_to_index: schema.symbols_to_index, - num_documents: resp.num_documents, - created_at: resp.created_at, - } -} - -async fn create_collection() { - let collection_schema_response = - collections_api::create_collection(Config::get(), Company::collection_schema()) - .await - .unwrap(); - - assert_eq!(collection_schema_response.num_documents, 0); - assert_eq!( - schema_to_resp(Company::collection_schema(), &collection_schema_response), - collection_schema_response - ); -} - -async fn get_collection() { - let collection_schema_response = collections_api::get_collection(Config::get(), "companies") - .await - .unwrap(); - - assert_eq!(collection_schema_response.num_documents, 1250); - assert_eq!( - schema_to_resp(Company::collection_schema(), &collection_schema_response), - collection_schema_response - ); -} - -async fn delete_collection() { - let collection_schema_response = collections_api::delete_collection(Config::get(), "companies") - .await - .unwrap(); - - assert_eq!(collection_schema_response.num_documents, 1200); - assert_eq!( - schema_to_resp(Company::collection_schema(), &collection_schema_response), - collection_schema_response - ); -} - -async fn get_collections() { - let collection_schema_response = collections_api::get_collections(Config::get()) - .await - .unwrap(); - - assert_eq!(collection_schema_response.len(), 2); -} - -#[cfg(all(feature = "tokio_test", not(target_arch = "wasm32")))] -mod tokio_test { - use super::*; - - #[tokio::test] - async fn create_collection_tokio() { - create_collection().await - } - - #[tokio::test] - async fn get_collection_tokio() { - get_collection().await - } - - #[tokio::test] - async fn delete_collection_tokio() { - delete_collection().await - } - - #[tokio::test] - async fn get_collections_tokio() { - get_collections().await - } -} - -#[cfg(target_arch = "wasm32")] -mod wasm_test { - use super::*; - use wasm_bindgen_test::wasm_bindgen_test; - - wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); - - #[wasm_bindgen_test] - async fn create_collection_wasm() { - console_error_panic_hook::set_once(); - - create_collection().await - } - - #[wasm_bindgen_test] - async fn get_collection_wasm() { - console_error_panic_hook::set_once(); - - get_collection().await - } - - #[wasm_bindgen_test] - async fn delete_collection_wasm() { - console_error_panic_hook::set_once(); - - delete_collection().await - } - - #[wasm_bindgen_test] - async fn get_collections_wasm() { - console_error_panic_hook::set_once(); - - get_collections().await - } -} diff --git a/typesense/tests/api/documents.rs b/typesense/tests/api/documents.rs deleted file mode 100644 index 8e3b01a..0000000 --- a/typesense/tests/api/documents.rs +++ /dev/null @@ -1,111 +0,0 @@ -#![allow(dead_code)] - -use super::Config; -use serde::{Deserialize, Serialize}; -use typesense::Typesense; -use typesense::document::Document; -use typesense::models::SearchParameters; -use typesense_codegen::apis::documents_api; - -#[derive(Typesense, Serialize, Deserialize)] -#[typesense(collection_name = "companies", default_sorting_field = "num_employees")] -struct Company { - company_name: String, - num_employees: i32, - #[typesense(facet)] - country: String, -} - -async fn import_documents() { - let documents = [ - Company { - company_name: "test".to_owned(), - num_employees: 1, - country: "c1".to_owned(), - }, - Company { - company_name: "test2".to_owned(), - num_employees: 2, - country: "c2".to_owned(), - }, - ] - .map(|c| serde_json::to_string(&c).unwrap()) - .join("\n"); - - let resp = documents_api::import_documents( - Config::get(), - &Company::collection_schema().name, - documents, - None, - ) - .await - .unwrap(); - - assert_eq!(&resp, "{\"success\":true}\n{\"success\":true}"); -} - -async fn search_collection() { - let search = SearchParameters { - q: "test".to_owned(), - query_by: "company_name".to_owned(), - ..Default::default() - }; - - let resp = documents_api::search_collection::( - Config::get(), - &Company::collection_schema().name, - search, - ) - .await - .unwrap(); - - assert_eq!(resp.found, Some(2)); - assert_eq!( - resp.hits - .unwrap() - .first() - .unwrap() - .document - .as_ref() - .unwrap() - .company_name, - "test".to_owned() - ); -} - -#[cfg(all(feature = "tokio_test", not(target_arch = "wasm32")))] -mod tokio_test { - use super::*; - - #[tokio::test] - async fn import_documents_tokio() { - import_documents().await - } - - #[tokio::test] - async fn search_collection_tokio() { - search_collection().await - } -} - -#[cfg(target_arch = "wasm32")] -mod wasm_test { - use super::*; - use wasm_bindgen_test::wasm_bindgen_test; - - wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); - - #[wasm_bindgen_test] - async fn import_documents_wasm() { - console_error_panic_hook::set_once(); - - import_documents().await - } - - #[wasm_bindgen_test] - async fn search_collection_wasm() { - console_error_panic_hook::set_once(); - - search_collection().await - } -} diff --git a/typesense/tests/api/lib.rs b/typesense/tests/api/lib.rs deleted file mode 100644 index b36f5a3..0000000 --- a/typesense/tests/api/lib.rs +++ /dev/null @@ -1,41 +0,0 @@ -// use std::sync::OnceLock; -// use typesense_codegen::apis::configuration::{ApiKey, Configuration}; - -// mod collection; -// mod documents; - -// static CONFIG: OnceLock = OnceLock::new(); - -// #[cfg(not(target_arch = "wasm32"))] -// fn init() -> Configuration { -// let _ = dotenvy::dotenv(); - -// let base_path = std::env::var("URL").expect("URL must be present in .env"); -// let key = std::env::var("API_KEY").expect("API_KEY must be present in .env"); - -// Configuration { -// base_path, -// api_key: Some(ApiKey { prefix: None, key }), -// ..Default::default() -// } -// } - -// #[cfg(target_arch = "wasm32")] -// fn init() -> Configuration { -// let base_path = "http://localhost:5000".to_owned(); -// let key = "VerySecretKey".to_owned(); - -// Configuration { -// base_path, -// api_key: Some(ApiKey { prefix: None, key }), -// ..Default::default() -// } -// } - -// pub struct Config; - -// impl Config { -// pub fn get() -> &'static Configuration { -// CONFIG.get_or_init(init) -// } -// } From 7c41be2e9bc0d3dffd8a95d6f46031fa96189fb1 Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Thu, 21 Aug 2025 11:07:11 +0700 Subject: [PATCH 15/21] ci: replace httpmock with an actual typesense server --- .github/workflows/ci.yml | 43 ++++++++++++++++++++--------------- typesense/tests/client/mod.rs | 4 ++-- 2 files changed, 27 insertions(+), 20 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0356dc7..4e0fb4e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,26 +1,40 @@ name: CI on: + workflow_dispatch: push: branches: - main + paths-ignore: + - '**/*.md' pull_request: branches: - main + paths-ignore: + - '**/*.md' jobs: tests: - runs-on: "${{ matrix.platform.os }}-latest" + runs-on: '${{ matrix.platform.os }}-latest' strategy: matrix: - platform: [ - { os: "ubuntu", target: "x86_64-unknown-linux-gnu" }, - { os: "ubuntu", target: "wasm32-unknown-unknown" }, - ] - env: - # used to connect to httpmock - URL: "http://localhost:5000" - API_KEY: "VerySecretKey" + platform: + [ + { os: 'ubuntu', target: 'x86_64-unknown-linux-gnu' }, + { os: 'ubuntu', target: 'wasm32-unknown-unknown' }, + ] + services: + typesense: + image: typesense/typesense:29.0 + ports: + - 8108:8108/tcp + volumes: + - /tmp/typesense-server-data:/data + env: + TYPESENSE_DATA_DIR: '/data' + TYPESENSE_API_KEY: 'xyz' + TYPESENSE_ENABLE_CORS: true + TYPESENSE_URL: 'http://localhost:8108' steps: - uses: actions/checkout@v4 - name: Cache .cargo and target @@ -38,21 +52,14 @@ jobs: target: ${{ matrix.platform.target }} profile: minimal default: true - - name: Install httpmock - uses: actions-rs/cargo@v1 - with: - command: install - args: --features standalone -- httpmock - - name: Run httpmock - run: httpmock --expose --mock-files-dir=./mocks & - name: Install test runner for wasm if: matrix.platform.target == 'wasm32-unknown-unknown' - run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh + run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh - name: Stable Build uses: actions-rs/cargo@v1 with: command: build - args: --all-features --target ${{ matrix.platform.target }} + args: --all-features --package typesense --target ${{ matrix.platform.target }} - name: Tests if: matrix.platform.target != 'wasm32-unknown-unknown' uses: actions-rs/cargo@v1 diff --git a/typesense/tests/client/mod.rs b/typesense/tests/client/mod.rs index 8befdb6..c6698d7 100644 --- a/typesense/tests/client/mod.rs +++ b/typesense/tests/client/mod.rs @@ -34,7 +34,7 @@ pub fn get_client() -> Client { /// Generates a unique name for a test resource by combining a prefix, /// a nanoid, and an optional suffix. /// e.g., "test_collection_aB1cD2eF_create" -pub fn new_id(suffix: &str) -> String { +pub fn new_id(prefix: &str) -> String { // Using nanoid for a short, URL-friendly, and collision-resistant random ID. // The default length of 21 is more than enough. We use 8 for conciseness. let random_part = nanoid::nanoid!(8); // e.g., "fX3a-b_1" @@ -46,5 +46,5 @@ pub fn new_id(suffix: &str) -> String { .unwrap() .as_millis(); - format!("test_{}_{}_{}", suffix, timestamp, random_part) + format!("test_{}_{}_{}", prefix, timestamp, random_part) } From d2510c8de7a876cb7d23e915c5f1028980646882 Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Thu, 21 Aug 2025 11:10:45 +0700 Subject: [PATCH 16/21] configure lint ci trigger --- .github/workflows/lint.yml | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 0527114..f5084e7 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,16 +1,23 @@ name: Lint checks on: + workflow_dispatch: push: - branches: [ main ] + branches: + - main + paths-ignore: + - '**/*.md' pull_request: - branches: [ main ] + branches: + - main + paths-ignore: + - '**/*.md' jobs: lint: runs-on: ubuntu-latest env: - TZ: "/usr/share/zoneinfo/your/location" + TZ: '/usr/share/zoneinfo/your/location' steps: - uses: actions/checkout@v4 - name: Cache .cargo and target From 54ad65d43dd4298e2c9822a7cfe3914ad833d127 Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Thu, 21 Aug 2025 11:57:29 +0700 Subject: [PATCH 17/21] fix lint, disable clippy & rustc elided_lifetimes_in_paths in typesense_codegen --- typesense/src/client/collection/document.rs | 8 +++----- typesense/src/client/collection/documents.rs | 5 +---- typesense/src/client/mod.rs | 2 +- typesense_codegen/.openapi-generator-ignore | 3 ++- typesense_codegen/Cargo.toml | 1 - typesense_codegen/src/lib.rs | 9 ++------- typesense_derive/src/field_attrs.rs | 20 ++++++++------------ typesense_derive/src/lib.rs | 12 +++++------- 8 files changed, 22 insertions(+), 38 deletions(-) diff --git a/typesense/src/client/collection/document.rs b/typesense/src/client/collection/document.rs index f5f682a..28a5d78 100644 --- a/typesense/src/client/collection/document.rs +++ b/typesense/src/client/collection/document.rs @@ -5,7 +5,7 @@ //! `client.collection::("books").document("123")` use crate::{Client, Error}; -use serde::{de::DeserializeOwned, Serialize}; +use serde::{Serialize, de::DeserializeOwned}; use std::sync::Arc; use typesense_codegen::{ apis::{configuration, documents_api}, @@ -68,10 +68,8 @@ where /// The updated full document is returned. /// /// # Arguments - /// * `partial_document` - A serializable struct or a `serde_json::Value` containing the fields to update. - /// For example: `serde_json::json!({ "in_stock": false })`. - /// * `params` - An optional `DocumentIndexParameters` struct to specify additional - /// parameters, such as `dirty_values` which determines what Typesense should do when the type of a particular field being indexed does not match the previously inferred type for that field, or the one defined in the collection's schema. + /// * `partial_document` - A serializable struct or a `serde_json::Value` containing the fields to update. For example: `serde_json::json!({ "in_stock": false })`. + /// * `params` - An optional `DocumentIndexParameters` struct to specify additional parameters, such as `dirty_values` which determines what Typesense should do when the type of a particular field being indexed does not match the previously inferred type for that field, or the one defined in the collection's schema. /// /// # Returns /// A `Result` containing the full, updated document deserialized into `T`. diff --git a/typesense/src/client/collection/documents.rs b/typesense/src/client/collection/documents.rs index d36d6fa..9aaca7c 100644 --- a/typesense/src/client/collection/documents.rs +++ b/typesense/src/client/collection/documents.rs @@ -6,7 +6,7 @@ use crate::models::SearchResult; use crate::{Client, Error}; -use serde::{de::DeserializeOwned, Serialize}; +use serde::{Serialize, de::DeserializeOwned}; use std::sync::Arc; use typesense_codegen::{ apis::{configuration, documents_api}, @@ -45,9 +45,6 @@ where } /// Indexes a document in the collection. - /// - - /// /// # Arguments /// * `document` - A `serde_json::Value` representing the document. /// * `action` - The indexing action to perform (e.g., "create", "upsert"). diff --git a/typesense/src/client/mod.rs b/typesense/src/client/mod.rs index ce7bba2..5c5aa22 100644 --- a/typesense/src/client/mod.rs +++ b/typesense/src/client/mod.rs @@ -355,7 +355,7 @@ impl Client { .expect("Failed to build reqwest client"), ) .with(RetryTransientMiddleware::new_with_policy( - self.retry_policy.clone(), + self.retry_policy, )) .build(); diff --git a/typesense_codegen/.openapi-generator-ignore b/typesense_codegen/.openapi-generator-ignore index ad5edb3..b2708db 100644 --- a/typesense_codegen/.openapi-generator-ignore +++ b/typesense_codegen/.openapi-generator-ignore @@ -24,4 +24,5 @@ Cargo.toml src/apis/configuration.rs -src/apis/mod.rs \ No newline at end of file +src/apis/mod.rs +src/lib.rs \ No newline at end of file diff --git a/typesense_codegen/Cargo.toml b/typesense_codegen/Cargo.toml index c517142..8eb72cb 100644 --- a/typesense_codegen/Cargo.toml +++ b/typesense_codegen/Cargo.toml @@ -12,7 +12,6 @@ serde = { version = "^1.0", features = ["derive"] } serde_json = "^1.0" serde_repr = "^0.1" url = "^2.5" -# used in both targets by the generator reqwest = { version = "0.12", default-features = false, features = ["json"] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] reqwest-middleware = { version = "0.4", features = ["json"] } diff --git a/typesense_codegen/src/lib.rs b/typesense_codegen/src/lib.rs index e152062..f9a96b5 100644 --- a/typesense_codegen/src/lib.rs +++ b/typesense_codegen/src/lib.rs @@ -1,11 +1,6 @@ #![allow(unused_imports)] -#![allow(clippy::too_many_arguments)] - -extern crate serde_repr; -extern crate serde; -extern crate serde_json; -extern crate url; -extern crate reqwest; +#![allow(clippy::all)] +#![allow(elided_lifetimes_in_paths)] pub mod apis; pub mod models; diff --git a/typesense_derive/src/field_attrs.rs b/typesense_derive/src/field_attrs.rs index 66527b3..4cec43a 100644 --- a/typesense_derive/src/field_attrs.rs +++ b/typesense_derive/src/field_attrs.rs @@ -65,7 +65,7 @@ fn extract_field_attrs(attrs: &[Attribute]) -> syn::Result { // Find the single #[typesense] attribute, erroring if there are more than one. let all_ts_attrs: Vec<&Attribute> = attrs .iter() - .filter(|a| a.path.get_ident().map_or(false, |i| i == "typesense")) + .filter(|a| a.path.get_ident().is_some_and(|i| i == "typesense")) .collect(); // Check for duplicates and create a rich, multi-span error if found @@ -284,11 +284,10 @@ fn extract_field_attrs(attrs: &[Attribute]) -> syn::Result { } }; - if let Some(TokenTree::Punct(p)) = tt_iter.peek() { - if p.as_char() == ',' { + if let Some(TokenTree::Punct(p)) = tt_iter.peek() + && p.as_char() == ',' { tt_iter.next(); // Consume the comma } - } } } @@ -297,19 +296,16 @@ fn extract_field_attrs(attrs: &[Attribute]) -> syn::Result { // Get the inner type for a given wrappper fn ty_inner_type<'a>(ty: &'a syn::Type, wrapper: &'static str) -> Option<&'a syn::Type> { - if let syn::Type::Path(p) = ty { - if p.path.segments.len() == 1 && p.path.segments[0].ident == wrapper { - if let syn::PathArguments::AngleBracketed(ref inner_ty) = p.path.segments[0].arguments { - if inner_ty.args.len() == 1 { + if let syn::Type::Path(p) = ty + && p.path.segments.len() == 1 && p.path.segments[0].ident == wrapper + && let syn::PathArguments::AngleBracketed(ref inner_ty) = p.path.segments[0].arguments + && inner_ty.args.len() == 1 { // len is 1 so this should not fail let inner_ty = inner_ty.args.first().unwrap(); if let syn::GenericArgument::Type(t) = inner_ty { return Some(t); } } - } - } - } None } @@ -350,7 +346,7 @@ pub fn process_field(field: &Field) -> syn::Result { let inner_type = get_inner_type(&field.ty); let is_vec = ty_inner_type(&field.ty, "Vec").is_some() || ty_inner_type(&field.ty, "Option") - .map_or(false, |t| ty_inner_type(t, "Vec").is_some()); + .is_some_and(|t| ty_inner_type(t, "Vec").is_some()); Ok(quote! { { diff --git a/typesense_derive/src/lib.rs b/typesense_derive/src/lib.rs index 5620f39..d0ec62b 100644 --- a/typesense_derive/src/lib.rs +++ b/typesense_derive/src/lib.rs @@ -47,8 +47,8 @@ fn impl_typesense_collection(item: ItemStruct) -> syn::Result { } = extract_attrs(attrs)?; let collection_name = collection_name.unwrap_or_else(|| ident.to_string().to_lowercase()); - if let Some(ref sorting_field) = default_sorting_field { - if !fields.iter().any(|field| + if let Some(ref sorting_field) = default_sorting_field + && !fields.iter().any(|field| // At this point we are sure that this field is a named field. field.ident.as_ref().unwrap() == sorting_field) { @@ -59,12 +59,11 @@ fn impl_typesense_collection(item: ItemStruct) -> syn::Result { ), )); } - } // Use flat_map to handle fields that expand into multiple schema fields let typesense_fields = fields .iter() - .map(|field| process_field(field)) // process_field returns a Result + .map(process_field) // process_field returns a Result .collect::>>()?; let default_sorting_field = if let Some(v) = default_sorting_field { @@ -276,11 +275,10 @@ fn string_list(tt_iter: &mut impl Iterator) -> syn::Result Date: Thu, 21 Aug 2025 11:58:40 +0700 Subject: [PATCH 18/21] remove api tests --- typesense/Cargo.toml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/typesense/Cargo.toml b/typesense/Cargo.toml index d55244e..8f567cc 100644 --- a/typesense/Cargo.toml +++ b/typesense/Cargo.toml @@ -64,10 +64,6 @@ name = "derive_tests" path = "tests/derive/lib.rs" required-features = ["derive"] -[[test]] -name = "api_tests" -path = "tests/api/lib.rs" - [[test]] name = "client" path = "tests/client/mod.rs" \ No newline at end of file From c4424de5e137f3b743b219e39bebc3110516300b Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Thu, 21 Aug 2025 14:00:08 +0700 Subject: [PATCH 19/21] feat: add builders for search parameters and multi search parameters --- typesense/src/builders/collection_schema.rs | 8 +- typesense/src/builders/mod.rs | 19 +- .../multi_search_collection_parameters.rs | 265 +++++++++++++++++ .../src/builders/multi_search_parameters.rs | 260 ++++++++++++++++ .../multi_search_searches_parameters.rs | 127 ++++++++ typesense/src/builders/search_parameters.rs | 280 ++++++++++++++++++ typesense/src/client/mod.rs | 40 +-- 7 files changed, 963 insertions(+), 36 deletions(-) create mode 100644 typesense/src/builders/multi_search_collection_parameters.rs create mode 100644 typesense/src/builders/multi_search_parameters.rs create mode 100644 typesense/src/builders/multi_search_searches_parameters.rs create mode 100644 typesense/src/builders/search_parameters.rs diff --git a/typesense/src/builders/collection_schema.rs b/typesense/src/builders/collection_schema.rs index b2cb0dd..9480274 100644 --- a/typesense/src/builders/collection_schema.rs +++ b/typesense/src/builders/collection_schema.rs @@ -67,7 +67,7 @@ impl CollectionSchemaBuilder { /// Adds a single [`Field`] to the collection schema. /// /// This is a convenience method for pushing one field at a time. - pub fn field(mut self, field: Field) -> Self { + pub fn add_field(mut self, field: Field) -> Self { self.fields.push(field); self } @@ -75,7 +75,7 @@ impl CollectionSchemaBuilder { /// Adds multiple [`Field`] values to the collection schema. /// /// This is a convenience method for appending a slice of fields. - pub fn fields(mut self, fields: &[Field]) -> Self + pub fn add_fields(mut self, fields: &[Field]) -> Self where Field: Clone, { @@ -109,8 +109,8 @@ mod test { let collection: CollectionSchema = new_collection_schema("companies", fields.clone().to_vec()) - .fields(&fields) - .field(new_collection_field("size", "string".into()).build()) + .add_fields(&fields) + .add_field(new_collection_field("size", "string".into()).build()) .default_sorting_field("num_employees") .build(); diff --git a/typesense/src/builders/mod.rs b/typesense/src/builders/mod.rs index b6a0333..6749bf7 100644 --- a/typesense/src/builders/mod.rs +++ b/typesense/src/builders/mod.rs @@ -2,5 +2,20 @@ mod collection_field; mod collection_schema; -pub use collection_field::new_collection_field; -pub use collection_schema::new_collection_schema; +mod multi_search_collection_parameters; +mod multi_search_parameters; +mod multi_search_searches_parameters; +mod search_parameters; + +pub use collection_field::{FieldBuilder, new_collection_field}; +pub use collection_schema::{CollectionSchemaBuilder, new_collection_schema}; + +pub use search_parameters::{SearchParametersBuilder, new_search_parameters}; + +pub use multi_search_collection_parameters::{ + MultiSearchCollectionParametersBuilder, new_multi_search_collection_parameters, +}; +pub use multi_search_parameters::{MultiSearchParametersBuilder, new_multi_search_parameters}; +pub use multi_search_searches_parameters::{ + MultiSearchSearchesParameterBuilder, new_multi_search_searches_parameter, +}; diff --git a/typesense/src/builders/multi_search_collection_parameters.rs b/typesense/src/builders/multi_search_collection_parameters.rs new file mode 100644 index 0000000..5cad599 --- /dev/null +++ b/typesense/src/builders/multi_search_collection_parameters.rs @@ -0,0 +1,265 @@ +//! Module for the `MultiSearchCollectionParameters` builder. + +use crate::models::{DropTokensMode, MultiSearchCollectionParameters}; +use bon::builder; + +/// Creates a new [`MultiSearchCollectionParameters`] builder. +/// +/// This builder helps construct an individual search query for a multi-search request. +/// All parameters are optional. +#[builder( + // expose a public builder type and a public finish_fn + builder_type(name = MultiSearchCollectionParametersBuilder, vis = "pub"), + finish_fn(name = build, vis = "pub"), + // allow passing &str into String params + on(String, into) +)] +pub fn new_multi_search_collection_parameters( + /// The collection to search in. + collection: Option, + /// The query text to search for in the collection. + q: Option, + /// A list of `string` fields that should be queried against. + query_by: Option, + /// The relative weight to give each `query_by` field when ranking results. + query_by_weights: Option, + /// How the representative text match score is calculated. + text_match_type: Option, + /// Indicates if the last word in the query should be treated as a prefix. + prefix: Option, + /// Infix search configuration. Can be `off`, `always`, or `fallback`. + infix: Option, + /// Maximum number of extra symbols before a query token for infix searching. + max_extra_prefix: Option, + /// Maximum number of extra symbols after a query token for infix searching. + max_extra_suffix: Option, + /// Filter conditions for refining search results. + filter_by: Option, + /// A list of fields and their sort orders. + sort_by: Option, + /// A list of fields to facet by. + facet_by: Option, + /// Maximum number of facet values to be returned. + max_facet_values: Option, + /// A query to filter facet values. + facet_query: Option, + /// The number of typographical errors (1 or 2) that would be tolerated. + num_typos: Option, + /// The page number to fetch. + page: Option, + /// Number of results to fetch per page. + per_page: Option, + /// Number of hits to fetch. + limit: Option, + /// The starting point of the result set. + offset: Option, + /// Fields to group results by. + group_by: Option, + /// Maximum number of hits to return for every group. + group_limit: Option, + /// Whether to group documents with null values in the `group_by` field. + group_missing_values: Option, + /// List of fields to include in the search result. + include_fields: Option, + /// List of fields to exclude from the search result. + exclude_fields: Option, + /// List of fields which should be highlighted fully. + highlight_full_fields: Option, + /// The number of tokens surrounding the highlighted text. + highlight_affix_num_tokens: Option, + /// The start tag for highlighted snippets. + highlight_start_tag: Option, + /// The end tag for highlighted snippets. + highlight_end_tag: Option, + /// Field values under this length will be fully highlighted. + snippet_threshold: Option, + /// Threshold for dropping query tokens to find more results. + drop_tokens_threshold: Option, + drop_tokens_mode: Option, + /// Threshold for trying more typos to find more results. + typo_tokens_threshold: Option, + /// Whether to enable typos on alphanumerical tokens. + enable_typos_for_alpha_numerical_tokens: Option, + /// Whether the `filter_by` condition applies to curated results. + filter_curated_hits: Option, + /// Whether to enable synonyms for the query. + enable_synonyms: Option, + /// Allow synonym resolution on word prefixes. + synonym_prefix: Option, + /// Number of typos allowed for synonym resolution. + synonym_num_typos: Option, + /// A list of records to unconditionally include at specific positions. + pinned_hits: Option, + /// A list of records to unconditionally hide from search results. + hidden_hits: Option, + /// Comma-separated list of tags to trigger curation rules. + override_tags: Option, + /// A list of custom fields that must be highlighted. + highlight_fields: Option, + /// You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same + pre_segmented_query: Option, + /// Search using a preset of search parameters. + preset: Option, + /// Whether to enable overrides for the query. + enable_overrides: Option, + /// Whether to prioritize an exact match. + prioritize_exact_match: Option, + /// Prioritize documents where query words appear earlier in the text. + prioritize_token_position: Option, + /// Prioritize documents where query words appear in more fields. + prioritize_num_matching_fields: Option, + /// Disable typos for numerical tokens. + enable_typos_for_numerical_tokens: Option, + /// Whether to perform an exhaustive search. + exhaustive_search: Option, + /// Search cutoff time in milliseconds. + search_cutoff_ms: Option, + /// Enable server-side caching of search results. + use_cache: Option, + /// The TTL for the search query cache. + cache_ttl: Option, + /// Minimum word length for 1-typo correction. + min_len_1typo: Option, + /// Minimum word length for 2-typo correction. + min_len_2typo: Option, + /// Vector query expression. + vector_query: Option, + /// Timeout for fetching remote embeddings. + remote_embedding_timeout_ms: Option, + /// Number of retries for fetching remote embeddings. + remote_embedding_num_tries: Option, + /// The underlying faceting strategy to use. + facet_strategy: Option, + /// Name of the stopwords set to apply for this search. + stopwords: Option, + /// Nested facet fields whose parent object should be returned. + facet_return_parent: Option, + /// The base64 encoded audio file. + voice_query: Option, + /// Enable conversational search. + conversation: Option, + /// The ID of the Conversation Model to use. + conversation_model_id: Option, + /// The ID of a previous conversation to continue. + conversation_id: Option, + /// A separate search API key for this specific search. + x_typesense_api_key: Option, + /// When true, computes both text match and vector distance scores for all matches in hybrid search. + rerank_hybrid_matches: Option, +) -> MultiSearchCollectionParameters { + MultiSearchCollectionParameters { + collection, + q, + query_by, + query_by_weights, + text_match_type, + prefix, + infix, + max_extra_prefix, + max_extra_suffix, + filter_by, + sort_by, + facet_by, + max_facet_values, + facet_query, + num_typos, + page, + per_page, + limit, + offset, + group_by, + group_limit, + group_missing_values, + include_fields, + exclude_fields, + highlight_full_fields, + highlight_affix_num_tokens, + highlight_start_tag, + highlight_end_tag, + snippet_threshold, + drop_tokens_threshold, + drop_tokens_mode, + typo_tokens_threshold, + enable_typos_for_alpha_numerical_tokens, + filter_curated_hits, + enable_synonyms, + synonym_prefix, + synonym_num_typos, + pinned_hits, + hidden_hits, + override_tags, + highlight_fields, + pre_segmented_query, + preset, + enable_overrides, + prioritize_exact_match, + prioritize_token_position, + prioritize_num_matching_fields, + enable_typos_for_numerical_tokens, + exhaustive_search, + search_cutoff_ms, + use_cache, + cache_ttl, + min_len_1typo, + min_len_2typo, + vector_query, + remote_embedding_timeout_ms, + remote_embedding_num_tries, + facet_strategy, + stopwords, + facet_return_parent, + voice_query, + conversation, + conversation_model_id, + conversation_id, + x_typesense_api_key, + rerank_hybrid_matches, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::models::MultiSearchCollectionParameters; + + #[test] + fn test_multi_search_collection_parameters_builder_basic() { + let built = new_multi_search_collection_parameters() + .collection("products") + .q("phone") + .limit(10) + .build(); + + let expected = MultiSearchCollectionParameters { + collection: Some("products".to_string()), + q: Some("phone".to_string()), + limit: Some(10), + ..Default::default() + }; + + assert_eq!(built, expected); + } + + #[test] + fn test_multi_search_collection_parameters_builder_api_key() { + let built = new_multi_search_collection_parameters() + .collection("private_docs") + .x_typesense_api_key("specific_key_for_this_search") + .build(); + + let expected = MultiSearchCollectionParameters { + collection: Some("private_docs".to_string()), + x_typesense_api_key: Some("specific_key_for_this_search".to_string()), + ..Default::default() + }; + + assert_eq!(built, expected); + } + + #[test] + fn test_multi_search_collection_parameters_builder_defaults() { + let built = new_multi_search_collection_parameters().build(); + let expected = MultiSearchCollectionParameters::default(); + assert_eq!(built, expected); + } +} diff --git a/typesense/src/builders/multi_search_parameters.rs b/typesense/src/builders/multi_search_parameters.rs new file mode 100644 index 0000000..9375697 --- /dev/null +++ b/typesense/src/builders/multi_search_parameters.rs @@ -0,0 +1,260 @@ +//! Module for the `MultiSearchParameters` builder. + +use crate::models::{DropTokensMode, MultiSearchParameters}; +use bon::builder; + +/// Creates a new [`MultiSearchParameters`] builder. +/// +/// This builder helps construct a set of common search parameters that can be applied +/// to all search queries within a multi-search request. +#[builder( + // expose a public builder type and a public finish_fn + builder_type(name = MultiSearchParametersBuilder, vis = "pub"), + finish_fn(name = build, vis = "pub"), + // allow passing &str into String params + on(String, into) +)] +pub fn new_multi_search_parameters( + /// The query text to search for in the collection. + q: Option, + /// A list of `string` fields that should be queried against. + query_by: Option, + /// The relative weight to give each `query_by` field when ranking results. + query_by_weights: Option, + /// How the representative text match score is calculated. + text_match_type: Option, + /// Indicates if the last word in the query should be treated as a prefix. + prefix: Option, + /// Infix search configuration. Can be `off`, `always`, or `fallback`. + infix: Option, + /// Maximum number of extra symbols before a query token for infix searching. + max_extra_prefix: Option, + /// Maximum number of extra symbols after a query token for infix searching. + max_extra_suffix: Option, + /// Filter conditions for refining search results. + filter_by: Option, + /// A list of fields and their sort orders. + sort_by: Option, + /// A list of fields to facet by. + facet_by: Option, + /// Maximum number of facet values to be returned. + max_facet_values: Option, + /// A query to filter facet values. + facet_query: Option, + /// The number of typographical errors (1 or 2) that would be tolerated. + num_typos: Option, + /// The page number to fetch. + page: Option, + /// Number of results to fetch per page. + per_page: Option, + /// Number of hits to fetch. + limit: Option, + /// The starting point of the result set. + offset: Option, + /// Fields to group results by. + group_by: Option, + /// Maximum number of hits to return for every group. + group_limit: Option, + /// Whether to group documents with null values in the `group_by` field. + group_missing_values: Option, + /// List of fields to include in the search result. + include_fields: Option, + /// List of fields to exclude from the search result. + exclude_fields: Option, + /// List of fields which should be highlighted fully. + highlight_full_fields: Option, + /// The number of tokens surrounding the highlighted text. + highlight_affix_num_tokens: Option, + /// The start tag for highlighted snippets. + highlight_start_tag: Option, + /// The end tag for highlighted snippets. + highlight_end_tag: Option, + /// Field values under this length will be fully highlighted. + snippet_threshold: Option, + /// Threshold for dropping query tokens to find more results. + drop_tokens_threshold: Option, + drop_tokens_mode: Option, + /// Threshold for trying more typos to find more results. + typo_tokens_threshold: Option, + /// Whether to enable typos on alphanumerical tokens. + enable_typos_for_alpha_numerical_tokens: Option, + /// Whether the `filter_by` condition applies to curated results. + filter_curated_hits: Option, + /// Whether to enable synonyms for the query. + enable_synonyms: Option, + /// Allow synonym resolution on word prefixes. + synonym_prefix: Option, + /// Number of typos allowed for synonym resolution. + synonym_num_typos: Option, + /// A list of records to unconditionally include at specific positions. + pinned_hits: Option, + /// A list of records to unconditionally hide from search results. + hidden_hits: Option, + /// Comma-separated list of tags to trigger curation rules. + override_tags: Option, + /// A list of custom fields that must be highlighted. + highlight_fields: Option, + /// You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same + pre_segmented_query: Option, + /// Search using a preset of search parameters. + preset: Option, + /// Whether to enable overrides for the query. + enable_overrides: Option, + /// Whether to prioritize an exact match. + prioritize_exact_match: Option, + /// Prioritize documents where query words appear earlier in the text. + prioritize_token_position: Option, + /// Prioritize documents where query words appear in more fields. + prioritize_num_matching_fields: Option, + /// Disable typos for numerical tokens. + enable_typos_for_numerical_tokens: Option, + /// Whether to perform an exhaustive search. + exhaustive_search: Option, + /// Search cutoff time in milliseconds. + search_cutoff_ms: Option, + /// Enable server-side caching of search results. + use_cache: Option, + /// The TTL for the search query cache. + cache_ttl: Option, + /// Minimum word length for 1-typo correction. + min_len_1typo: Option, + /// Minimum word length for 2-typo correction. + min_len_2typo: Option, + /// Vector query expression. + vector_query: Option, + /// Timeout for fetching remote embeddings. + remote_embedding_timeout_ms: Option, + /// Number of retries for fetching remote embeddings. + remote_embedding_num_tries: Option, + /// The underlying faceting strategy to use. + facet_strategy: Option, + /// Name of the stopwords set to apply for this search. + stopwords: Option, + /// Nested facet fields whose parent object should be returned. + facet_return_parent: Option, + /// The base64 encoded audio file. + voice_query: Option, + /// Enable conversational search. + conversation: Option, + /// The ID of the Conversation Model to use. + conversation_model_id: Option, + /// The ID of a previous conversation to continue. + conversation_id: Option, +) -> MultiSearchParameters { + MultiSearchParameters { + q, + query_by, + query_by_weights, + text_match_type, + prefix, + infix, + max_extra_prefix, + max_extra_suffix, + filter_by, + sort_by, + facet_by, + max_facet_values, + facet_query, + num_typos, + page, + per_page, + limit, + offset, + group_by, + group_limit, + group_missing_values, + include_fields, + exclude_fields, + highlight_full_fields, + highlight_affix_num_tokens, + highlight_start_tag, + highlight_end_tag, + snippet_threshold, + drop_tokens_threshold, + drop_tokens_mode, + typo_tokens_threshold, + enable_typos_for_alpha_numerical_tokens, + filter_curated_hits, + enable_synonyms, + synonym_prefix, + synonym_num_typos, + pinned_hits, + hidden_hits, + override_tags, + highlight_fields, + pre_segmented_query, + preset, + enable_overrides, + prioritize_exact_match, + prioritize_token_position, + prioritize_num_matching_fields, + enable_typos_for_numerical_tokens, + exhaustive_search, + search_cutoff_ms, + use_cache, + cache_ttl, + min_len_1typo, + min_len_2typo, + vector_query, + remote_embedding_timeout_ms, + remote_embedding_num_tries, + facet_strategy, + stopwords, + facet_return_parent, + voice_query, + conversation, + conversation_model_id, + conversation_id, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::models::{DropTokensMode, MultiSearchParameters}; + + #[test] + fn test_multi_search_parameters_builder_basic() { + let built = new_multi_search_parameters() + .query_by("title") + .per_page(5) + .build(); + + let expected = MultiSearchParameters { + query_by: Some("title".to_string()), + per_page: Some(5), + ..Default::default() + }; + + assert_eq!(built, expected); + } + + #[test] + fn test_multi_search_parameters_builder_full() { + let built = new_multi_search_parameters() + .q("*") + .filter_by("category:shoes") + .use_cache(true) + .drop_tokens_mode(DropTokensMode::LeftToRight) + .search_cutoff_ms(100) + .build(); + + let expected = MultiSearchParameters { + q: Some("*".to_string()), + filter_by: Some("category:shoes".to_string()), + use_cache: Some(true), + drop_tokens_mode: Some(DropTokensMode::LeftToRight), + search_cutoff_ms: Some(100), + ..Default::default() + }; + + assert_eq!(built, expected); + } + + #[test] + fn test_multi_search_parameters_builder_defaults() { + let built = new_multi_search_parameters().build(); + let expected = MultiSearchParameters::default(); + assert_eq!(built, expected); + } +} diff --git a/typesense/src/builders/multi_search_searches_parameters.rs b/typesense/src/builders/multi_search_searches_parameters.rs new file mode 100644 index 0000000..60a3490 --- /dev/null +++ b/typesense/src/builders/multi_search_searches_parameters.rs @@ -0,0 +1,127 @@ +//! Module for the `MultiSearchSearchesParameter` builder. + +use crate::models::{MultiSearchCollectionParameters, MultiSearchSearchesParameter}; + +/// A builder for creating a `MultiSearchSearchesParameter` object. +/// +/// This builder is used to construct the body of a multi-search request by +/// adding individual search queries one by one. +#[derive(Debug, Default)] +pub struct MultiSearchSearchesParameterBuilder { + searches: Vec, +} + +impl MultiSearchSearchesParameterBuilder { + /// Creates a new, empty builder. + pub fn new() -> Self { + Self::default() + } + + /// Adds a single search query to the multi-search request. + /// + /// # Arguments + /// + /// * `search` - A `MultiSearchCollectionParameters` object representing an + /// individual search query. + pub fn add_search(mut self, search: MultiSearchCollectionParameters) -> Self { + self.searches.push(search); + self + } + + /// Adds multiple search queries to the multi-search request from an iterator. + /// + /// # Arguments + /// + /// * `searches` - An iterator that yields `MultiSearchCollectionParameters` objects. + pub fn add_searches( + mut self, + searches: impl IntoIterator, + ) -> Self { + self.searches.extend(searches); + self + } + + /// Consumes the builder and returns a `MultiSearchSearchesParameter` object. + pub fn build(self) -> MultiSearchSearchesParameter { + MultiSearchSearchesParameter { + searches: self.searches, + } + } +} + +/// Creates a new [`MultiSearchSearchesParameterBuilder`]. +/// +/// This is the entry point for building a multi-search request body. +pub fn new_multi_search_searches_parameter() -> MultiSearchSearchesParameterBuilder { + MultiSearchSearchesParameterBuilder::new() +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::builders::new_multi_search_collection_parameters; + + #[test] + fn test_multi_search_builder_new_is_empty() { + let multi_search_request = new_multi_search_searches_parameter().build(); + assert!(multi_search_request.searches.is_empty()); + } + + #[test] + fn test_multi_search_builder_add_one_search() { + let search1 = new_multi_search_collection_parameters() + .collection("products") + .q("shoe") + .build(); + + let multi_search_request = new_multi_search_searches_parameter() + .add_search(search1.clone()) + .build(); + + assert_eq!(multi_search_request.searches.len(), 1); + assert_eq!(multi_search_request.searches[0], search1); + } + + #[test] + fn test_multi_search_builder_add_multiple_searches_chained() { + let search1 = new_multi_search_collection_parameters() + .collection("products") + .q("shoe") + .build(); + let search2 = new_multi_search_collection_parameters() + .collection("brands") + .q("Nike") + .build(); + + let multi_search_request = new_multi_search_searches_parameter() + .add_search(search1.clone()) + .add_search(search2.clone()) + .build(); + + assert_eq!(multi_search_request.searches.len(), 2); + assert_eq!(multi_search_request.searches[0], search1); + assert_eq!(multi_search_request.searches[1], search2); + } + + #[test] + fn test_multi_search_builder_add_searches_from_iterator() { + let searches_vec = vec![ + new_multi_search_collection_parameters() + .collection("c1") + .build(), + new_multi_search_collection_parameters() + .collection("c2") + .build(), + new_multi_search_collection_parameters() + .collection("c3") + .build(), + ]; + + let multi_search_request = new_multi_search_searches_parameter() + .add_searches(searches_vec.clone()) + .build(); + + assert_eq!(multi_search_request.searches.len(), 3); + assert_eq!(multi_search_request.searches, searches_vec); + } +} diff --git a/typesense/src/builders/search_parameters.rs b/typesense/src/builders/search_parameters.rs new file mode 100644 index 0000000..18efae6 --- /dev/null +++ b/typesense/src/builders/search_parameters.rs @@ -0,0 +1,280 @@ +//! Module for the `SearchParameters` builder. + +use crate::models::{DropTokensMode, SearchParameters}; +use bon::builder; + +/// Creates a new [`SearchParameters`] builder. +/// +/// This builder provides a convenient way to construct a `SearchParameters` object +/// for a Typesense search query. All parameters are optional. +#[builder( + // expose a public builder type named `SearchParametersBuilder` and a public finish_fn `build()` + builder_type(name = SearchParametersBuilder, vis = "pub"), + finish_fn(name = build, vis = "pub"), + // allow passing &str into String params + on(String, into) +)] +pub fn new_search_parameters( + /// The query text to search for in the collection. Use * as the search string to return all documents. This is typically useful when used in conjunction with filter_by. + q: Option, + /// A list of `string` fields that should be queried against. Multiple fields are separated with a comma. + query_by: Option, + /// Whether to use natural language processing to parse the query. + nl_query: Option, + /// The ID of the natural language model to use. + nl_model_id: Option, + /// The relative weight to give each `query_by` field when ranking results. This can be used to boost fields in priority, when looking for matches. Multiple fields are separated with a comma. + query_by_weights: Option, + /// In a multi-field matching context, this parameter determines how the representative text match score of a record is calculated. Possible values are max_score (default) or max_weight. + text_match_type: Option, + /// Boolean field to indicate that the last word in the query should be treated as a prefix, and not as a whole word. This is used for building autocomplete and instant search interfaces. Defaults to true. + prefix: Option, + /// If infix index is enabled for this field, infix searching can be done on a per-field basis by sending a comma separated string parameter called infix to the search query. This parameter can have 3 values; `off` infix search is disabled, which is default `always` infix search is performed along with regular search `fallback` infix search is performed if regular search does not produce results + infix: Option, + /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. + max_extra_prefix: Option, + /// There are also 2 parameters that allow you to control the extent of infix searching max_extra_prefix and max_extra_suffix which specify the maximum number of symbols before or after the query that can be present in the token. For example query \"K2100\" has 2 extra symbols in \"6PK2100\". By default, any number of prefixes/suffixes can be present for a match. + max_extra_suffix: Option, + /// Filter conditions for refining youropen api validator search results. Separate multiple conditions with &&. + filter_by: Option, + /// Controls the number of similar words that Typesense considers during fuzzy search on filter_by values. Useful for controlling prefix matches like company_name:Acm*. + max_filter_by_candidates: Option, + /// A list of numerical fields and their corresponding sort orders that will be used for ordering your results. Up to 3 sort fields can be specified. The text similarity score is exposed as a special `_text_match` field that you can use in the list of sorting fields. If no `sort_by` parameter is specified, results are sorted by `_text_match:desc,default_sorting_field:desc` + sort_by: Option, + /// A list of fields that will be used for faceting your results on. Separate multiple fields with a comma. + facet_by: Option, + /// Maximum number of facet values to be returned. + max_facet_values: Option, + /// Facet values that are returned can now be filtered via this parameter. The matching facet text is also highlighted. For example, when faceting by `category`, you can set `facet_query=category:shoe` to return only facet values that contain the prefix \"shoe\". + facet_query: Option, + /// The number of typographical errors (1 or 2) that would be tolerated. Default: 2 + num_typos: Option, + /// Results from this specific page number would be fetched. + page: Option, + /// Number of results to fetch per page. Default: 10 + per_page: Option, + /// Number of hits to fetch. Can be used as an alternative to the per_page parameter. Default: 10. + limit: Option, + /// Identifies the starting point to return hits from a result set. Can be used as an alternative to the page parameter. + offset: Option, + /// You can aggregate search results into groups or buckets by specify one or more `group_by` fields. Separate multiple fields with a comma. To group on a particular field, it must be a faceted field. + group_by: Option, + /// Maximum number of hits to be returned for every group. If the `group_limit` is set as `K` then only the top K hits in each group are returned in the response. Default: 3 + group_limit: Option, + /// Setting this parameter to true will place all documents that have a null value in the group_by field, into a single group. Setting this parameter to false, will cause each document with a null value in the group_by field to not be grouped with other documents. Default: true + group_missing_values: Option, + /// List of fields from the document to include in the search result + include_fields: Option, + /// List of fields from the document to exclude in the search result + exclude_fields: Option, + /// List of fields which should be highlighted fully without snippeting + highlight_full_fields: Option, + /// The number of tokens that should surround the highlighted text on each side. Default: 4 + highlight_affix_num_tokens: Option, + /// The start tag used for the highlighted snippets. Default: `` + highlight_start_tag: Option, + /// The end tag used for the highlighted snippets. Default: `` + highlight_end_tag: Option, + /// Flag for enabling/disabling the deprecated, old highlight structure in the response. Default: true + enable_highlight_v1: Option, + /// Field values under this length will be fully highlighted, instead of showing a snippet of relevant portion. Default: 30 + snippet_threshold: Option, + /// If the number of results found for a specific query is less than this number, Typesense will attempt to drop the tokens in the query until enough results are found. Tokens that have the least individual hits are dropped first. Set to 0 to disable. Default: 10 + drop_tokens_threshold: Option, + drop_tokens_mode: Option, + /// If the number of results found for a specific query is less than this number, Typesense will attempt to look for tokens with more typos until enough results are found. Default: 100 + typo_tokens_threshold: Option, + /// Set this parameter to false to disable typos on alphanumerical query tokens. Default: true. + enable_typos_for_alpha_numerical_tokens: Option, + /// Whether the filter_by condition of the search query should be applicable to curated results (override definitions, pinned hits, hidden hits, etc.). Default: false + filter_curated_hits: Option, + /// If you have some synonyms defined but want to disable all of them for a particular search query, set enable_synonyms to false. Default: true + enable_synonyms: Option, + /// Allow synonym resolution on word prefixes in the query. Default: false + synonym_prefix: Option, + /// Allow synonym resolution on typo-corrected words in the query. Default: 0 + synonym_num_typos: Option, + /// A list of records to unconditionally include in the search results at specific positions. An example use case would be to feature or promote certain items on the top of search results. A list of `record_id:hit_position`. Eg: to include a record with ID 123 at Position 1 and another record with ID 456 at Position 5, you'd specify `123:1,456:5`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + pinned_hits: Option, + /// A list of records to unconditionally hide from search results. A list of `record_id`s to hide. Eg: to hide records with IDs 123 and 456, you'd specify `123,456`. You could also use the Overrides feature to override search results based on rules. Overrides are applied first, followed by `pinned_hits` and finally `hidden_hits`. + hidden_hits: Option, + /// Comma separated list of tags to trigger the curations rules that match the tags. + override_tags: Option, + /// A list of custom fields that must be highlighted even if you don't query for them + highlight_fields: Option, + /// Treat space as typo: search for q=basket ball if q=basketball is not found or vice-versa. Splitting/joining of tokens will only be attempted if the original query produces no results. To always trigger this behavior, set value to `always``. To disable, set value to `off`. Default is `fallback`. + split_join_tokens: Option, + /// You can index content from any logographic language into Typesense if you are able to segment / split the text into space-separated words yourself before indexing and querying. Set this parameter to true to do the same + pre_segmented_query: Option, + /// Search using a bunch of search parameters by setting this parameter to the name of the existing Preset. + preset: Option, + /// If you have some overrides defined but want to disable all of them during query time, you can do that by setting this parameter to false + enable_overrides: Option, + /// Set this parameter to true to ensure that an exact match is ranked above the others + prioritize_exact_match: Option, + /// Control the number of words that Typesense considers for typo and prefix searching. + max_candidates: Option, + /// Make Typesense prioritize documents where the query words appear earlier in the text. + prioritize_token_position: Option, + /// Make Typesense prioritize documents where the query words appear in more number of fields. + prioritize_num_matching_fields: Option, + /// Make Typesense disable typos for numerical tokens. + enable_typos_for_numerical_tokens: Option, + /// Setting this to true will make Typesense consider all prefixes and typo corrections of the words in the query without stopping early when enough results are found (drop_tokens_threshold and typo_tokens_threshold configurations are ignored). + exhaustive_search: Option, + /// Typesense will attempt to return results early if the cutoff time has elapsed. This is not a strict guarantee and facet computation is not bound by this parameter. + search_cutoff_ms: Option, + /// Enable server side caching of search query results. By default, caching is disabled. + use_cache: Option, + /// The duration (in seconds) that determines how long the search query is cached. This value can be set on a per-query basis. Default: 60. + cache_ttl: Option, + /// Minimum word length for 1-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + min_len_1typo: Option, + /// Minimum word length for 2-typo correction to be applied. The value of num_typos is still treated as the maximum allowed typos. + min_len_2typo: Option, + /// Vector query expression for fetching documents \"closest\" to a given query/document vector. + vector_query: Option, + /// Timeout (in milliseconds) for fetching remote embeddings. + remote_embedding_timeout_ms: Option, + /// Number of times to retry fetching remote embeddings. + remote_embedding_num_tries: Option, + /// Choose the underlying faceting strategy used. Comma separated string of allows values: exhaustive, top_values or automatic (default). + facet_strategy: Option, + /// Name of the stopwords set to apply for this search, the keywords present in the set will be removed from the search query. + stopwords: Option, + /// Comma separated string of nested facet fields whose parent object should be returned in facet response. + facet_return_parent: Option, + /// The base64 encoded audio file in 16 khz 16-bit WAV format. + voice_query: Option, + /// Enable conversational search. + conversation: Option, + /// The Id of Conversation Model to be used. + conversation_model_id: Option, + /// The Id of a previous conversation to continue, this tells Typesense to include prior context when communicating with the LLM. + conversation_id: Option, +) -> SearchParameters { + SearchParameters { + q, + query_by, + nl_query, + nl_model_id, + query_by_weights, + text_match_type, + prefix, + infix, + max_extra_prefix, + max_extra_suffix, + filter_by, + max_filter_by_candidates, + sort_by, + facet_by, + max_facet_values, + facet_query, + num_typos, + page, + per_page, + limit, + offset, + group_by, + group_limit, + group_missing_values, + include_fields, + exclude_fields, + highlight_full_fields, + highlight_affix_num_tokens, + highlight_start_tag, + highlight_end_tag, + enable_highlight_v1, + snippet_threshold, + drop_tokens_threshold, + drop_tokens_mode, + typo_tokens_threshold, + enable_typos_for_alpha_numerical_tokens, + filter_curated_hits, + enable_synonyms, + synonym_prefix, + synonym_num_typos, + pinned_hits, + hidden_hits, + override_tags, + highlight_fields, + split_join_tokens, + pre_segmented_query, + preset, + enable_overrides, + prioritize_exact_match, + max_candidates, + prioritize_token_position, + prioritize_num_matching_fields, + enable_typos_for_numerical_tokens, + exhaustive_search, + search_cutoff_ms, + use_cache, + cache_ttl, + min_len_1typo, + min_len_2typo, + vector_query, + remote_embedding_timeout_ms, + remote_embedding_num_tries, + facet_strategy, + stopwords, + facet_return_parent, + voice_query, + conversation, + conversation_model_id, + conversation_id, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::models::{DropTokensMode, SearchParameters}; + + #[test] + fn test_search_parameters_builder_basic() { + let built = new_search_parameters() + .q("a test query") + .query_by("title,description") + .per_page(15) + .build(); + + let expected = SearchParameters { + q: Some("a test query".to_string()), + query_by: Some("title,description".to_string()), + per_page: Some(15), + ..Default::default() + }; + + assert_eq!(built, expected); + } + + #[test] + fn test_search_parameters_builder_full() { + let built = new_search_parameters() + .q("*") + .filter_by("stock > 0") + .use_cache(true) + .drop_tokens_mode(DropTokensMode::LeftToRight) + .search_cutoff_ms(50) + .build(); + + let expected = SearchParameters { + q: Some("*".to_string()), + filter_by: Some("stock > 0".to_string()), + use_cache: Some(true), + drop_tokens_mode: Some(DropTokensMode::LeftToRight), + search_cutoff_ms: Some(50), + ..Default::default() + }; + + assert_eq!(built, expected); + } + + #[test] + fn test_search_parameters_builder_defaults() { + let built = new_search_parameters().build(); + let expected = SearchParameters::default(); + assert_eq!(built, expected); + } +} diff --git a/typesense/src/client/mod.rs b/typesense/src/client/mod.rs index 5c5aa22..3b9c56b 100644 --- a/typesense/src/client/mod.rs +++ b/typesense/src/client/mod.rs @@ -139,8 +139,8 @@ use keys::Keys; use operations::Operations; use preset::Preset; use presets::Presets; -use serde::de::DeserializeOwned; use serde::Serialize; +use serde::de::DeserializeOwned; use stemming::Stemming; use stopword::Stopword; use stopwords::Stopwords; @@ -149,14 +149,14 @@ use crate::Error; use reqwest::Url; #[cfg(not(target_arch = "wasm32"))] use reqwest_middleware::ClientBuilder as ReqwestMiddlewareClientBuilder; -use reqwest_retry::policies::ExponentialBackoff; #[cfg(not(target_arch = "wasm32"))] use reqwest_retry::RetryTransientMiddleware; +use reqwest_retry::policies::ExponentialBackoff; use std::future::Future; use std::sync::{ - atomic::{AtomicUsize, Ordering}, Arc, Mutex, + atomic::{AtomicUsize, Ordering}, }; use typesense_codegen::apis::{self, configuration}; use web_time::{Duration, Instant}; @@ -171,28 +171,6 @@ struct Node { is_healthy: bool, last_access_timestamp: Instant, } - -// impl Default for MultiNodeConfiguration { -// /// Provides a default configuration suitable for local development. -// /// -// /// - **nodes**: Empty. -// /// - **nearest_node**: None. -// /// - **api_key**: "xyz" (a common placeholder). -// /// - **healthcheck_interval**: 60 seconds. -// /// - **retry_policy**: Exponential backoff with a maximum of 3 retries. -// /// - **connection_timeout**: 5 seconds. -// fn default() -> Self { -// Self { -// nodes: vec![], -// nearest_node: None, -// api_key: "xyz".to_string(), -// healthcheck_interval: Duration::from_secs(60), -// retry_policy: ExponentialBackoff::builder().build_with_max_retries(3), -// connection_timeout: Duration::from_secs(5), -// } -// } -// } - /// The main entry point for all interactions with the Typesense API. /// /// The client manages connections to multiple nodes and provides access to different @@ -216,7 +194,11 @@ pub struct Client { impl Client { /// Creates a new `Client` with the given configuration. /// - /// Returns an error if the configuration contains no nodes. + /// Returns an error if the configuration contains no nodes. Default values: + /// - **nearest_node**: None. + /// - **healthcheck_interval**: 60 seconds. + /// - **retry_policy**: Exponential backoff with a maximum of 3 retries. (disabled on WASM) + /// - **connection_timeout**: 5 seconds. (disabled on WASM) #[builder] pub fn new( /// The Typesense API key used for authentication. @@ -232,7 +214,7 @@ impl Client { #[builder(default = ExponentialBackoff::builder().build_with_max_retries(3))] /// The retry policy for transient network errors on a *single* node. retry_policy: ExponentialBackoff, - #[builder(default = Duration::from_secs(10))] + #[builder(default = Duration::from_secs(5))] /// The timeout for each individual network request. connection_timeout: Duration, ) -> Result { @@ -354,9 +336,7 @@ impl Client { .build() .expect("Failed to build reqwest client"), ) - .with(RetryTransientMiddleware::new_with_policy( - self.retry_policy, - )) + .with(RetryTransientMiddleware::new_with_policy(self.retry_policy)) .build(); // Create a temporary, single-node config for the generated API function. From 51d8402c317ba5567078d0e7cd062fcb6cdffe1b Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Fri, 22 Aug 2025 20:17:08 +0700 Subject: [PATCH 20/21] feat: operations API --- typesense/src/client/operations.rs | 110 +++++++++- typesense/src/models/mod.rs | 1 + .../tests/client/derive_integration_test.rs | 6 +- typesense/tests/client/documents_test.rs | 17 +- typesense/tests/client/mod.rs | 1 + typesense/tests/client/operations_test.rs | 192 ++++++++++++++++++ typesense/tests/derive/lib.rs | 4 +- 7 files changed, 308 insertions(+), 23 deletions(-) create mode 100644 typesense/tests/client/operations_test.rs diff --git a/typesense/src/client/operations.rs b/typesense/src/client/operations.rs index 60a7721..d784fc9 100644 --- a/typesense/src/client/operations.rs +++ b/typesense/src/client/operations.rs @@ -5,11 +5,7 @@ use crate::{Client, Error}; use std::sync::Arc; use typesense_codegen::{ - apis::{ - configuration, - debug_api, - health_api, // Add this line - }, + apis::{configuration, debug_api, health_api, operations_api}, models, }; @@ -38,10 +34,8 @@ impl<'a> Operations<'a> { .await } - /// Checks if a Typesense node is healthy and ready to accept requests. - /// - /// This method will try nodes in sequence according to the health policy - /// until it gets a successful response (`{"ok": true}`). + /// Get health information about a Typesense node. + /// When a node is running out of memory / disk, the API response will have an additional resource_error field that's set to either `OUT_OF_DISK`` or `OUT_OF_MEMORY``. pub async fn health(&self) -> Result> { self.client .execute(|config: Arc| async move { @@ -49,4 +43,102 @@ impl<'a> Operations<'a> { }) .await } + + /// Get current RAM, CPU, Disk & Network usage metrics. + /// ### Example JSON response: + /// ```json + /// { + /// "system_cpu1_active_percentage": "0.00", + /// "system_cpu2_active_percentage": "0.00", + /// "system_cpu3_active_percentage": "0.00", + /// "system_cpu4_active_percentage": "0.00", + /// "system_cpu_active_percentage": "0.00", + /// "system_disk_total_bytes": "1043447808", + /// "system_disk_used_bytes": "561152", + /// "system_memory_total_bytes": "2086899712", + /// "system_memory_used_bytes": "1004507136", + /// "system_memory_total_swap_bytes": "1004507136", + /// "system_memory_used_swap_bytes": "0.00", + /// "system_network_received_bytes": "1466", + /// "system_network_sent_bytes": "182", + /// "typesense_memory_active_bytes": "29630464", + /// "typesense_memory_allocated_bytes": "27886840", + /// "typesense_memory_fragmentation_ratio": "0.06", + /// "typesense_memory_mapped_bytes": "69701632", + /// "typesense_memory_metadata_bytes": "4588768", + /// "typesense_memory_resident_bytes": "29630464", + /// "typesense_memory_retained_bytes": "25718784" + /// } + /// ``` + pub async fn retrieve_metrics( + &self, + ) -> Result> { + self.client + .execute(|config: Arc| async move { + operations_api::retrieve_metrics(&config).await + }) + .await + } + + /// Get stats about API endpoints. + /// This endpoint returns average requests per second and latencies for all requests in the last 10 seconds. + /// Example JSON response: + /// ```json + /// { + /// "latency_ms": { + /// "GET /collections/products": 0.0, + /// "POST /collections": 4.0, + /// "POST /collections/products/documents/import": 1166.0 + /// }, + /// "requests_per_second": { + /// "GET /collections/products": 0.1, + /// "POST /collections": 0.1, + /// "POST /collections/products/documents/import": 0.1 + /// } + /// } + /// ``` + pub async fn retrieve_api_stats( + &self, + ) -> Result> { + self.client + .execute(|config: Arc| async move { + operations_api::retrieve_api_stats(&config).await + }) + .await + } + + /// Creates a point-in-time snapshot of a Typesense node's state and data in the specified directory. + /// You can then backup the snapshot directory that gets created and later restore it as a data directory, as needed. + pub async fn take_snapshot( + &self, + params: operations_api::TakeSnapshotParams, + ) -> Result> { + self.client + .execute(|config: Arc| { + let params_for_move = params.clone(); + async move { operations_api::take_snapshot(&config, params_for_move).await } + }) + .await + } + + /// Triggers a follower node to initiate the raft voting process, which triggers leader re-election. + /// The follower node that you run this operation against will become the new leader, once this command succeeds. + pub async fn vote(&self) -> Result> { + self.client + .execute(|config: Arc| async move { + operations_api::vote(&config).await + }) + .await + } + + /// You can check the status of in-progress schema change operations by using the schema changes endpoint. + pub async fn get_schema_changes( + &self, + ) -> Result, Error> { + self.client + .execute(|config: Arc| async move { + operations_api::get_schema_changes(&config).await + }) + .await + } } diff --git a/typesense/src/models/mod.rs b/typesense/src/models/mod.rs index d4c7f26..74aa28b 100644 --- a/typesense/src/models/mod.rs +++ b/typesense/src/models/mod.rs @@ -6,6 +6,7 @@ pub use multi_search::*; pub use scoped_key_parameters::*; pub use search_result::*; +pub use typesense_codegen::apis::operations_api::TakeSnapshotParams; pub use typesense_codegen::models::{ AnalyticsEventCreateResponse, AnalyticsEventCreateSchema, AnalyticsRuleDeleteResponse, AnalyticsRuleParameters, AnalyticsRuleParametersDestination, AnalyticsRuleParametersSource, diff --git a/typesense/tests/client/derive_integration_test.rs b/typesense/tests/client/derive_integration_test.rs index caf9de5..5f174f1 100644 --- a/typesense/tests/client/derive_integration_test.rs +++ b/typesense/tests/client/derive_integration_test.rs @@ -1,8 +1,8 @@ use serde::{Deserialize, Serialize}; -use typesense::models::SearchParameters; -use typesense::prelude::*; use typesense::Field; use typesense::Typesense; +use typesense::models::SearchParameters; +use typesense::prelude::*; use crate::{get_client, new_id}; @@ -31,7 +31,7 @@ struct Manufacturer { city: String, } -/// The main "uber" struct that uses every feature of the derive macro. +/// The main struct that uses every feature of the derive macro. #[derive(Typesense, Serialize, Deserialize, Debug, PartialEq, Clone)] #[typesense( collection_name = "mega_products", diff --git a/typesense/tests/client/documents_test.rs b/typesense/tests/client/documents_test.rs index c0a5e87..5b6d8d6 100644 --- a/typesense/tests/client/documents_test.rs +++ b/typesense/tests/client/documents_test.rs @@ -1,9 +1,12 @@ use serde::{Deserialize, Serialize}; use serde_json::json; -use typesense::models::{ - CollectionSchema, DeleteDocumentsParameters, DirtyValues, DocumentIndexParameters, - ExportDocumentsParameters, Field, ImportDocumentsParameters, IndexAction, SearchParameters, - UpdateDocumentsParameters, +use typesense::{ + models::{ + CollectionSchema, DeleteDocumentsParameters, DirtyValues, DocumentIndexParameters, + ExportDocumentsParameters, Field, ImportDocumentsParameters, IndexAction, SearchParameters, + UpdateDocumentsParameters, + }, + new_search_parameters, }; use super::{get_client, new_id}; @@ -76,11 +79,7 @@ async fn run_test_document_lifecycle() { assert_eq!(retrieve_res.unwrap(), book_1); // --- 5. Search for documents --- - let search_params = SearchParameters { - q: Some("the".to_string()), - query_by: Some("title".to_string()), - ..Default::default() - }; + let search_params = new_search_parameters().q("the").query_by("title").build(); let search_res = documents_client.search(search_params).await; assert!(search_res.is_ok(), "Search failed"); assert_eq!(search_res.unwrap().found, Some(2)); diff --git a/typesense/tests/client/mod.rs b/typesense/tests/client/mod.rs index c6698d7..8b2f2f9 100644 --- a/typesense/tests/client/mod.rs +++ b/typesense/tests/client/mod.rs @@ -7,6 +7,7 @@ mod derive_integration_test; mod documents_test; mod keys_test; mod multi_search_test; +mod operations_test; mod presets_test; mod search_overrides_test; mod stemming_dictionaries_test; diff --git a/typesense/tests/client/operations_test.rs b/typesense/tests/client/operations_test.rs new file mode 100644 index 0000000..6303b08 --- /dev/null +++ b/typesense/tests/client/operations_test.rs @@ -0,0 +1,192 @@ +use super::get_client; +use typesense::models::TakeSnapshotParams; + +async fn run_test_health_check() { + let client = get_client(); + + let health_result = client.operations().health().await; + assert!(health_result.is_ok(), "Failed to get health status"); + let health_status = health_result.unwrap(); + assert!( + matches!(health_status.ok, true | false), + "The 'ok' field should be a boolean." + ); +} + +async fn run_test_debug_info() { + let client = get_client(); + + let debug_result = client.operations().debug().await; + assert!(debug_result.is_ok(), "Failed to get debug information"); + let debug_info = debug_result.unwrap(); + + assert!( + debug_info.version.is_some(), + "Debug info should contain a version" + ); +} + +async fn run_test_retrieve_metrics() { + let client = get_client(); + let metrics_result = client.operations().retrieve_metrics().await; + + assert!(metrics_result.is_ok(), "Failed to retrieve metrics"); + let metrics = metrics_result.unwrap(); + + assert!(metrics.is_object(), "Metrics should be a JSON object"); + assert!( + metrics.get("system_memory_used_bytes").is_some(), + "Expected system_memory_used_bytes in metrics" + ); + assert!( + metrics.get("typesense_memory_active_bytes").is_some(), + "Expected typesense_memory_active_bytes in metrics" + ); +} + +async fn run_test_retrieve_api_stats() { + let client = get_client(); + let stats_result = client.operations().retrieve_api_stats().await; + + assert!(stats_result.is_ok(), "Failed to retrieve API stats"); + let stats = stats_result.unwrap(); + // The maps might be empty if there are no recent requests, + // so we just check that the call succeeds and returns the correct structure. + assert!( + stats.latency_ms.is_some(), + "Expected latency_ms field in API stats" + ); + assert!( + stats.requests_per_second.is_some(), + "Expected requests_per_second field in API stats" + ); +} + +async fn run_test_take_snapshot() { + let client = get_client(); + // Note: This requires a directory that Typesense can write to. + // In a typical Docker setup, `/tmp` is a safe choice. + let params = TakeSnapshotParams { + snapshot_path: "/tmp/typesense-snapshots-rust-test".to_string(), + }; + let snapshot_result = client.operations().take_snapshot(params).await; + + assert!(snapshot_result.is_ok(), "Failed to take snapshot"); + assert!( + snapshot_result.unwrap().success, + "Snapshot operation should be successful" + ); +} + +async fn run_test_vote() { + let client = get_client(); + let vote_result = client.operations().vote().await; + + assert!( + matches!(vote_result.unwrap().success, true | false), + "The 'success' field should be a boolean." + ); +} + +async fn run_test_get_schema_changes() { + let client = get_client(); + let schema_changes_result = client.operations().get_schema_changes().await; + + assert!( + schema_changes_result.is_ok(), + "Failed to get schema changes" + ); + // The result is a Vec, which is sufficient to confirm the endpoint call was successful. + // The vec can be empty if no schema changes are in progress. + let _schema_changes = schema_changes_result.unwrap(); +} + +#[cfg(not(target_arch = "wasm32"))] +mod tokio_test { + use super::*; + + #[tokio::test] + async fn test_health_check() { + run_test_health_check().await; + } + + #[tokio::test] + async fn test_debug_info() { + run_test_debug_info().await; + } + + #[tokio::test] + async fn test_retrieve_metrics() { + run_test_retrieve_metrics().await; + } + + #[tokio::test] + async fn test_retrieve_api_stats() { + run_test_retrieve_api_stats().await; + } + + #[tokio::test] + async fn test_take_snapshot() { + run_test_take_snapshot().await; + } + + #[tokio::test] + async fn test_vote() { + run_test_vote().await; + } + + #[tokio::test] + async fn test_get_schema_changes() { + run_test_get_schema_changes().await; + } +} + +#[cfg(target_arch = "wasm32")] +mod wasm_test { + use super::*; + use wasm_bindgen_test::wasm_bindgen_test; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + #[wasm_bindgen_test] + async fn test_health_check() { + console_error_panic_hook::set_once(); + run_test_health_check().await; + } + + #[wasm_bindgen_test] + async fn test_debug_info() { + console_error_panic_hook::set_once(); + run_test_debug_info().await; + } + + #[wasm_bindgen_test] + async fn test_retrieve_metrics() { + console_error_panic_hook::set_once(); + run_test_retrieve_metrics().await; + } + + #[wasm_bindgen_test] + async fn test_retrieve_api_stats() { + console_error_panic_hook::set_once(); + run_test_retrieve_api_stats().await; + } + + #[wasm_bindgen_test] + async fn test_take_snapshot() { + console_error_panic_hook::set_once(); + run_test_take_snapshot().await; + } + + #[wasm_bindgen_test] + async fn test_vote() { + console_error_panic_hook::set_once(); + run_test_vote().await; + } + + #[wasm_bindgen_test] + async fn test_get_schema_changes() { + console_error_panic_hook::set_once(); + run_test_get_schema_changes().await; + } +} diff --git a/typesense/tests/derive/lib.rs b/typesense/tests/derive/lib.rs index e0561b0..8b50c7c 100644 --- a/typesense/tests/derive/lib.rs +++ b/typesense/tests/derive/lib.rs @@ -1,5 +1,5 @@ -#[cfg(feature = "typesense_derive")] +#[cfg(feature = "derive")] mod compile; -#[cfg(feature = "typesense_derive")] +#[cfg(feature = "derive")] mod derive_collection_schema_test; From 09cfdfdbb8deb616d95f0077a14a5f1a8a89e04b Mon Sep 17 00:00:00 2001 From: Hayden Hung Hoang Date: Sat, 23 Aug 2025 11:48:23 +0700 Subject: [PATCH 21/21] refactor: remove `Arc` in `Arc Alias<'a> { &self, ) -> Result> { let params = collections_api::GetAliasParams { - alias_name: self.name.to_string(), + alias_name: self.name.to_owned(), }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { collections_api::get_alias(&config, params_for_move).await } }) @@ -47,7 +46,7 @@ impl<'a> Alias<'a> { alias_name: self.name.to_string(), }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { collections_api::delete_alias(&config, params_for_move).await } }) diff --git a/typesense/src/client/aliases.rs b/typesense/src/client/aliases.rs index 4420d94..94c3852 100644 --- a/typesense/src/client/aliases.rs +++ b/typesense/src/client/aliases.rs @@ -3,7 +3,6 @@ //! An `Aliases` instance is created via the main `client.aliases()` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{collections_api, configuration}, models, @@ -41,7 +40,7 @@ impl<'a> Aliases<'a> { collection_alias_schema: Some(schema), }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { collections_api::upsert_alias(&config, params_for_move).await } }) @@ -53,7 +52,7 @@ impl<'a> Aliases<'a> { &self, ) -> Result> { self.client - .execute(|config: Arc| async move { + .execute(|config: configuration::Configuration| async move { collections_api::get_aliases(&config).await }) .await diff --git a/typesense/src/client/analytics/events.rs b/typesense/src/client/analytics/events.rs index b07af68..9d68924 100644 --- a/typesense/src/client/analytics/events.rs +++ b/typesense/src/client/analytics/events.rs @@ -3,7 +3,6 @@ //! An `Events` instance is created via the `Client::analytics().events()` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{analytics_api, configuration}, models, @@ -37,7 +36,7 @@ impl<'a> Events<'a> { analytics_event_create_schema: schema, }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { analytics_api::create_analytics_event(&config, params_for_move).await } }) diff --git a/typesense/src/client/analytics/rule.rs b/typesense/src/client/analytics/rule.rs index b1c1372..1496947 100644 --- a/typesense/src/client/analytics/rule.rs +++ b/typesense/src/client/analytics/rule.rs @@ -3,7 +3,6 @@ //! An `Rule` instance is created via the `Client::analytics().rule("rule_name")` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{analytics_api, configuration}, models, @@ -31,7 +30,7 @@ impl<'a> Rule<'a> { rule_name: self.rule_name.to_string(), }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { analytics_api::retrieve_analytics_rule(&config, params_for_move).await } }) @@ -47,7 +46,7 @@ impl<'a> Rule<'a> { rule_name: self.rule_name.to_string(), }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { analytics_api::delete_analytics_rule(&config, params_for_move).await } }) diff --git a/typesense/src/client/analytics/rules.rs b/typesense/src/client/analytics/rules.rs index adfbd55..bca50bb 100644 --- a/typesense/src/client/analytics/rules.rs +++ b/typesense/src/client/analytics/rules.rs @@ -3,7 +3,6 @@ //! An `Rules` instance is created via the `Client::analytics().rules()` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{analytics_api, configuration}, models, @@ -34,7 +33,7 @@ impl<'a> Rules<'a> { analytics_rule_schema: schema, }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { analytics_api::create_analytics_rule(&config, params_for_move).await } }) @@ -56,7 +55,7 @@ impl<'a> Rules<'a> { analytics_rule_upsert_schema: schema, }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { analytics_api::upsert_analytics_rule(&config, params_for_move).await } }) @@ -71,7 +70,7 @@ impl<'a> Rules<'a> { Error, > { self.client - .execute(|config: Arc| async move { + .execute(|config: configuration::Configuration| async move { analytics_api::retrieve_analytics_rules(&config).await }) .await diff --git a/typesense/src/client/collection/document.rs b/typesense/src/client/collection/document.rs index 28a5d78..afa7357 100644 --- a/typesense/src/client/collection/document.rs +++ b/typesense/src/client/collection/document.rs @@ -6,7 +6,6 @@ use crate::{Client, Error}; use serde::{Serialize, de::DeserializeOwned}; -use std::sync::Arc; use typesense_codegen::{ apis::{configuration, documents_api}, models, @@ -54,7 +53,7 @@ where let result_value = self .client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { documents_api::get_document(&config, params_for_move).await } }) @@ -120,7 +119,7 @@ where let result_value = self .client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { documents_api::update_document(&config, params_for_move).await } }) @@ -143,7 +142,7 @@ where let result_value = self .client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { documents_api::delete_document(&config, params_for_move).await } }) diff --git a/typesense/src/client/collection/documents.rs b/typesense/src/client/collection/documents.rs index 9aaca7c..8be2c4f 100644 --- a/typesense/src/client/collection/documents.rs +++ b/typesense/src/client/collection/documents.rs @@ -7,7 +7,6 @@ use crate::models::SearchResult; use crate::{Client, Error}; use serde::{Serialize, de::DeserializeOwned}; -use std::sync::Arc; use typesense_codegen::{ apis::{configuration, documents_api}, models::{ @@ -61,7 +60,7 @@ where dirty_values: params.unwrap_or_default().dirty_values, // Or expose this as an argument if needed }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { documents_api::index_document(&config, params_for_move).await } }) @@ -132,7 +131,7 @@ where }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { documents_api::import_documents(&config, params_for_move).await } }) @@ -155,7 +154,7 @@ where }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { documents_api::export_documents(&config, params_for_move).await } }) @@ -179,7 +178,7 @@ where truncate: params.truncate, }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { documents_api::delete_documents(&config, params_for_move).await } }) @@ -203,7 +202,7 @@ where body: document, }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { documents_api::update_documents(&config, params_for_move).await } }) @@ -296,7 +295,7 @@ where let raw_result = self .client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = search_params.clone(); async move { documents_api::search_collection(&config, params_for_move).await } }) diff --git a/typesense/src/client/collection/mod.rs b/typesense/src/client/collection/mod.rs index 9644ddf..69c07e7 100644 --- a/typesense/src/client/collection/mod.rs +++ b/typesense/src/client/collection/mod.rs @@ -12,8 +12,7 @@ use crate::{Client, Error}; use search_override::SearchOverride; use search_overrides::SearchOverrides; -use serde::{de::DeserializeOwned, Serialize}; -use std::sync::Arc; +use serde::{Serialize, de::DeserializeOwned}; use synonym::Synonym; use synonyms::Synonyms; use typesense_codegen::{ @@ -106,7 +105,7 @@ where }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { collections_api::get_collection(&config, params_for_move).await } }) @@ -124,7 +123,7 @@ where collection_name: self.collection_name.to_string(), }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { collections_api::delete_collection(&config, params_for_move).await } }) @@ -144,7 +143,7 @@ where collection_update_schema: update_schema, }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { collections_api::update_collection(&config, params_for_move).await } }) diff --git a/typesense/src/client/collection/search_override.rs b/typesense/src/client/collection/search_override.rs index 5648e6d..78fd4f6 100644 --- a/typesense/src/client/collection/search_override.rs +++ b/typesense/src/client/collection/search_override.rs @@ -3,7 +3,6 @@ //! An instance of `SearchOverride` is created via the `Client::collection("collection_name").search_override("search_override_id")` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{configuration, documents_api}, models, @@ -38,7 +37,7 @@ impl<'a> SearchOverride<'a> { }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { documents_api::get_search_override(&config, params_for_move).await } }) @@ -56,7 +55,7 @@ impl<'a> SearchOverride<'a> { }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { documents_api::delete_search_override(&config, params_for_move).await } }) diff --git a/typesense/src/client/collection/search_overrides.rs b/typesense/src/client/collection/search_overrides.rs index b439358..54983ee 100644 --- a/typesense/src/client/collection/search_overrides.rs +++ b/typesense/src/client/collection/search_overrides.rs @@ -3,7 +3,6 @@ //! An instance of `SearchOverrides` is created via the `Client::collection("collection_name").search_overrides()` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{configuration, documents_api}, models, @@ -44,7 +43,7 @@ impl<'a> SearchOverrides<'a> { search_override_schema: schema, }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { documents_api::upsert_search_override(&config, params_for_move).await } }) @@ -61,7 +60,7 @@ impl<'a> SearchOverrides<'a> { }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { documents_api::get_search_overrides(&config, params_for_move).await } }) diff --git a/typesense/src/client/collection/synonym.rs b/typesense/src/client/collection/synonym.rs index 847b7dd..aeb466c 100644 --- a/typesense/src/client/collection/synonym.rs +++ b/typesense/src/client/collection/synonym.rs @@ -3,7 +3,6 @@ //! An instance of `Synonym` is created via the `client.collection("collection_name").synonym("synonym_id")` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{configuration, synonyms_api}, models, @@ -38,7 +37,7 @@ impl<'a> Synonym<'a> { }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { synonyms_api::get_search_synonym(&config, params_for_move).await } }) @@ -56,7 +55,7 @@ impl<'a> Synonym<'a> { }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { synonyms_api::delete_search_synonym(&config, params_for_move).await } }) diff --git a/typesense/src/client/collection/synonyms.rs b/typesense/src/client/collection/synonyms.rs index 513fecc..1562f72 100644 --- a/typesense/src/client/collection/synonyms.rs +++ b/typesense/src/client/collection/synonyms.rs @@ -3,7 +3,6 @@ //! An instance of `Synonyms` is created via the `client.collection("collection_name").synonyms()` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{configuration, synonyms_api}, models, @@ -42,7 +41,7 @@ impl<'a> Synonyms<'a> { search_synonym_schema: schema, }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { synonyms_api::upsert_search_synonym(&config, params_for_move).await } }) @@ -58,7 +57,7 @@ impl<'a> Synonyms<'a> { }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { synonyms_api::get_search_synonyms(&config, params_for_move).await } }) diff --git a/typesense/src/client/collections.rs b/typesense/src/client/collections.rs index 0036c3d..7ddbcde 100644 --- a/typesense/src/client/collections.rs +++ b/typesense/src/client/collections.rs @@ -3,7 +3,6 @@ //! A `Collections` instance is created via the main `client.collections()` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{collections_api, configuration}, models, @@ -39,7 +38,7 @@ impl<'a> Collections<'a> { }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { collections_api::create_collection(&config, params_for_move).await } }) @@ -54,7 +53,7 @@ impl<'a> Collections<'a> { &self, ) -> Result, Error> { self.client - .execute(|config: Arc| async move { + .execute(|config: configuration::Configuration| async move { collections_api::get_collections(&config).await }) .await diff --git a/typesense/src/client/conversations/model.rs b/typesense/src/client/conversations/model.rs index 33bf34e..ea8e5ec 100644 --- a/typesense/src/client/conversations/model.rs +++ b/typesense/src/client/conversations/model.rs @@ -3,7 +3,6 @@ //! An instance of `Model` is created via the `Conversations::model()` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{configuration, conversations_api}, models, @@ -34,7 +33,7 @@ impl<'a> Model<'a> { model_id: self.model_id.to_string(), }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { conversations_api::retrieve_conversation_model(&config, params_for_move).await @@ -59,7 +58,7 @@ impl<'a> Model<'a> { conversation_model_update_schema: schema, }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { conversations_api::update_conversation_model(&config, params_for_move).await @@ -79,7 +78,7 @@ impl<'a> Model<'a> { model_id: self.model_id.to_string(), }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { conversations_api::delete_conversation_model(&config, params_for_move).await diff --git a/typesense/src/client/conversations/models.rs b/typesense/src/client/conversations/models.rs index e093a5b..a5458b0 100644 --- a/typesense/src/client/conversations/models.rs +++ b/typesense/src/client/conversations/models.rs @@ -3,7 +3,6 @@ //! An instance of `Models` is created via the `Conversations::models()` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{configuration, conversations_api}, models, @@ -37,7 +36,7 @@ impl<'a> Models<'a> { conversation_model_create_schema: schema, }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { conversations_api::create_conversation_model(&config, params_for_move).await @@ -54,7 +53,7 @@ impl<'a> Models<'a> { Error, > { self.client - .execute(|config: Arc| async move { + .execute(|config: configuration::Configuration| async move { conversations_api::retrieve_all_conversation_models(&config).await }) .await diff --git a/typesense/src/client/key.rs b/typesense/src/client/key.rs index 77f7e27..fb2701d 100644 --- a/typesense/src/client/key.rs +++ b/typesense/src/client/key.rs @@ -3,7 +3,6 @@ //! A `Key` instance is created via the `Client::key(key_id)` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{configuration, keys_api}, models, @@ -32,7 +31,7 @@ impl<'a> Key<'a> { key_id: self.key_id, }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { keys_api::get_key(&config, params_for_move).await } }) @@ -47,7 +46,7 @@ impl<'a> Key<'a> { key_id: self.key_id, }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { keys_api::delete_key(&config, params_for_move).await } }) diff --git a/typesense/src/client/keys.rs b/typesense/src/client/keys.rs index 18906c8..12b791c 100644 --- a/typesense/src/client/keys.rs +++ b/typesense/src/client/keys.rs @@ -3,13 +3,12 @@ //! An `Keys` instance is created via the `Client::keys()` method. use crate::{ - models::{self, ScopedKeyParameters}, Client, Error, + models::{self, ScopedKeyParameters}, }; -use base64::{engine::general_purpose::STANDARD as Base64Engine, Engine}; +use base64::{Engine, engine::general_purpose::STANDARD as Base64Engine}; use hmac::{Hmac, Mac}; use sha2::Sha256; -use std::sync::Arc; use typesense_codegen::apis::{configuration, keys_api}; /// Provides methods for managing a collection of Typesense API keys. @@ -40,7 +39,7 @@ impl<'a> Keys<'a> { api_key_schema: Some(schema), }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { keys_api::create_key(&config, params_for_move).await } }) @@ -50,7 +49,7 @@ impl<'a> Keys<'a> { /// Lists all API keys and their metadata. pub async fn retrieve(&self) -> Result> { self.client - .execute(|config: Arc| async move { + .execute(|config: configuration::Configuration| async move { keys_api::get_keys(&config).await }) .await diff --git a/typesense/src/client/mod.rs b/typesense/src/client/mod.rs index 3b9c56b..13aee14 100644 --- a/typesense/src/client/mod.rs +++ b/typesense/src/client/mod.rs @@ -303,9 +303,9 @@ impl Client { /// The core execution method that handles multi-node failover and retries. /// This internal method is called by all public API methods. - pub(super) async fn execute(&self, api_call: F) -> Result> + pub(super) async fn execute<'a, F, Fut, T, E>(&'a self, api_call: F) -> Result> where - F: Fn(Arc) -> Fut, + F: Fn(configuration::Configuration) -> Fut, Fut: Future>>, E: std::fmt::Debug + 'static, apis::Error: std::error::Error + 'static, @@ -324,11 +324,9 @@ impl Client { #[cfg(target_arch = "wasm32")] let http_client = reqwest::Client::builder() - // .timeout() is not available on wasm32 .build() .expect("Failed to build reqwest client"); - // This client handles transient retries (e.g. network blips) on the *current node*. #[cfg(not(target_arch = "wasm32"))] let http_client = ReqwestMiddlewareClientBuilder::new( reqwest::Client::builder() @@ -339,8 +337,8 @@ impl Client { .with(RetryTransientMiddleware::new_with_policy(self.retry_policy)) .build(); - // Create a temporary, single-node config for the generated API function. - let gen_config = Arc::new(configuration::Configuration { + // Create the temporary config on the stack for this attempt. + let gen_config = configuration::Configuration { base_path: node_url .to_string() .strip_suffix('/') @@ -352,27 +350,24 @@ impl Client { }), client: http_client, ..Default::default() - }); + }; match api_call(gen_config).await { Ok(response) => { - self.set_node_health(&node_arc, true); // Mark as healthy on success. + self.set_node_health(&node_arc, true); return Ok(response); } Err(e) => { if is_retriable(&e) { - self.set_node_health(&node_arc, false); // Mark as unhealthy on retriable error. + self.set_node_health(&node_arc, false); last_api_error = Some(e); - // Continue loop to try the next node. } else { - // Non-retriable error (e.g., 404 Not Found), fail fast. return Err(e.into()); } } } } - // If the loop finishes, all nodes have failed. Err(crate::Error::AllNodesFailed { source: last_api_error .expect("No nodes were available to try, or all errors were non-retriable."), diff --git a/typesense/src/client/multi_search.rs b/typesense/src/client/multi_search.rs index 5072839..23908ff 100644 --- a/typesense/src/client/multi_search.rs +++ b/typesense/src/client/multi_search.rs @@ -3,11 +3,10 @@ //! A `MultiSearch` instance is created via the main `Client::multi_search()` method. use crate::{ - models::SearchResult, traits::MultiSearchResultExt, Client, Error, MultiSearchParseError, - MultiSearchSearchesParameter, + Client, Error, MultiSearchParseError, MultiSearchSearchesParameter, models::SearchResult, + traits::MultiSearchResultExt, }; use serde::de::DeserializeOwned; -use std::sync::Arc; use typesense_codegen::{ apis::{ configuration::Configuration, @@ -170,7 +169,7 @@ impl<'a> MultiSearch<'a> { let raw_result = self .client - .execute(|config: Arc| { + .execute(|config: Configuration| { let params_for_move: MultiSearchParams = multi_search_params.clone(); async move { documents_api::multi_search(&config, params_for_move).await } }) @@ -289,7 +288,7 @@ impl<'a> MultiSearch<'a> { // Execute the request to get the raw JSON value let raw_result = self .client - .execute(|config: Arc| { + .execute(|config: Configuration| { let params_for_move = multi_search_params.clone(); async move { documents_api::multi_search(&config, params_for_move).await } }) @@ -303,7 +302,7 @@ impl<'a> MultiSearch<'a> { let raw_search_result: raw_models::SearchResult = serde_json::from_value(json_value).map_err(Error::from)?; - // Then, use your existing constructor to convert the raw result to the typed one, + // Then, use our existing constructor to convert the raw result to the typed one, // specifying `serde_json::Value` as the document type. SearchResult::::from_raw(raw_search_result).map_err(Error::from) } diff --git a/typesense/src/client/operations.rs b/typesense/src/client/operations.rs index d784fc9..7e71321 100644 --- a/typesense/src/client/operations.rs +++ b/typesense/src/client/operations.rs @@ -3,7 +3,6 @@ //! An `Operations` instance is created via the main `Client::operations()` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{configuration, debug_api, health_api, operations_api}, models, @@ -28,7 +27,7 @@ impl<'a> Operations<'a> { /// to the specific node that responded successfully. pub async fn debug(&self) -> Result> { self.client - .execute(|config: Arc| async move { + .execute(|config: configuration::Configuration| async move { debug_api::debug(&config).await }) .await @@ -38,7 +37,7 @@ impl<'a> Operations<'a> { /// When a node is running out of memory / disk, the API response will have an additional resource_error field that's set to either `OUT_OF_DISK`` or `OUT_OF_MEMORY``. pub async fn health(&self) -> Result> { self.client - .execute(|config: Arc| async move { + .execute(|config: configuration::Configuration| async move { health_api::health(&config).await }) .await @@ -74,7 +73,7 @@ impl<'a> Operations<'a> { &self, ) -> Result> { self.client - .execute(|config: Arc| async move { + .execute(|config: configuration::Configuration| async move { operations_api::retrieve_metrics(&config).await }) .await @@ -101,7 +100,7 @@ impl<'a> Operations<'a> { &self, ) -> Result> { self.client - .execute(|config: Arc| async move { + .execute(|config: configuration::Configuration| async move { operations_api::retrieve_api_stats(&config).await }) .await @@ -114,7 +113,7 @@ impl<'a> Operations<'a> { params: operations_api::TakeSnapshotParams, ) -> Result> { self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { operations_api::take_snapshot(&config, params_for_move).await } }) @@ -125,7 +124,7 @@ impl<'a> Operations<'a> { /// The follower node that you run this operation against will become the new leader, once this command succeeds. pub async fn vote(&self) -> Result> { self.client - .execute(|config: Arc| async move { + .execute(|config: configuration::Configuration| async move { operations_api::vote(&config).await }) .await @@ -136,7 +135,7 @@ impl<'a> Operations<'a> { &self, ) -> Result, Error> { self.client - .execute(|config: Arc| async move { + .execute(|config: configuration::Configuration| async move { operations_api::get_schema_changes(&config).await }) .await diff --git a/typesense/src/client/preset.rs b/typesense/src/client/preset.rs index 659b7d3..7b48f5c 100644 --- a/typesense/src/client/preset.rs +++ b/typesense/src/client/preset.rs @@ -3,7 +3,6 @@ //! A `Preset` instance is created via the main `Client::preset(id)` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{configuration, presets_api}, models, @@ -31,7 +30,7 @@ impl<'a> Preset<'a> { preset_id: self.preset_id.to_string(), }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { presets_api::retrieve_preset(&config, params_for_move).await } }) @@ -46,7 +45,7 @@ impl<'a> Preset<'a> { preset_id: self.preset_id.to_string(), }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { presets_api::delete_preset(&config, params_for_move).await } }) diff --git a/typesense/src/client/presets.rs b/typesense/src/client/presets.rs index 29f1966..dee82d0 100644 --- a/typesense/src/client/presets.rs +++ b/typesense/src/client/presets.rs @@ -5,7 +5,6 @@ //! A `Presets` instance is created via the main `Client::presets()` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{configuration, presets_api}, models, @@ -29,7 +28,7 @@ impl<'a> Presets<'a> { &self, ) -> Result> { self.client - .execute(|config: Arc| async move { + .execute(|config: configuration::Configuration| async move { presets_api::retrieve_all_presets(&config).await }) .await @@ -50,7 +49,7 @@ impl<'a> Presets<'a> { preset_upsert_schema: schema, }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { presets_api::upsert_preset(&config, params_for_move).await } }) diff --git a/typesense/src/client/stemming/dictionaries.rs b/typesense/src/client/stemming/dictionaries.rs index 480e252..8ca9abd 100644 --- a/typesense/src/client/stemming/dictionaries.rs +++ b/typesense/src/client/stemming/dictionaries.rs @@ -3,7 +3,6 @@ //! A `Dictionaries` instance is created via the `Client::stemming().dictionaries()` method. use crate::client::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{configuration, stemming_api}, models, @@ -39,7 +38,7 @@ impl<'a> Dictionaries<'a> { body: dictionary_jsonl, }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { stemming_api::import_stemming_dictionary(&config, params_for_move).await } }) @@ -54,7 +53,7 @@ impl<'a> Dictionaries<'a> { Error, > { self.client - .execute(|config: Arc| async move { + .execute(|config: configuration::Configuration| async move { stemming_api::list_stemming_dictionaries(&config).await }) .await diff --git a/typesense/src/client/stemming/dictionary.rs b/typesense/src/client/stemming/dictionary.rs index 223c029..436223c 100644 --- a/typesense/src/client/stemming/dictionary.rs +++ b/typesense/src/client/stemming/dictionary.rs @@ -3,7 +3,6 @@ //! An instance of `Dictionary` is created via the `Client::stemming().dictionary()` method. use crate::client::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{configuration, stemming_api}, models, @@ -34,7 +33,7 @@ impl<'a> Dictionary<'a> { dictionary_id: self.dictionary_id.to_string(), }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { stemming_api::get_stemming_dictionary(&config, params_for_move).await } }) diff --git a/typesense/src/client/stopword.rs b/typesense/src/client/stopword.rs index 86800a8..3a95996 100644 --- a/typesense/src/client/stopword.rs +++ b/typesense/src/client/stopword.rs @@ -3,7 +3,6 @@ //! An instance of `Stopword` is created via the `Client::stopword()` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{configuration, stopwords_api}, models, @@ -32,7 +31,7 @@ impl<'a> Stopword<'a> { set_id: self.set_id.to_string(), }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { stopwords_api::retrieve_stopwords_set(&config, params_for_move).await } }) @@ -48,7 +47,7 @@ impl<'a> Stopword<'a> { set_id: self.set_id.to_string(), }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { stopwords_api::delete_stopwords_set(&config, params_for_move).await } }) diff --git a/typesense/src/client/stopwords.rs b/typesense/src/client/stopwords.rs index d9374e6..12e2f36 100644 --- a/typesense/src/client/stopwords.rs +++ b/typesense/src/client/stopwords.rs @@ -3,7 +3,6 @@ //! A `Stopwords` instance is created via the main `Client::stopwords()` method. use crate::{Client, Error}; -use std::sync::Arc; use typesense_codegen::{ apis::{configuration, stopwords_api}, models, @@ -37,7 +36,7 @@ impl<'a> Stopwords<'a> { stopwords_set_upsert_schema: schema, }; self.client - .execute(|config: Arc| { + .execute(|config: configuration::Configuration| { let params_for_move = params.clone(); async move { stopwords_api::upsert_stopwords_set(&config, params_for_move).await } }) @@ -52,7 +51,7 @@ impl<'a> Stopwords<'a> { Error, > { self.client - .execute(|config: Arc| async move { + .execute(|config: configuration::Configuration| async move { stopwords_api::retrieve_stopwords_sets(&config).await }) .await diff --git a/typesense/src/models/search_result.rs b/typesense/src/models/search_result.rs index 62c69be..aca1138 100644 --- a/typesense/src/models/search_result.rs +++ b/typesense/src/models/search_result.rs @@ -1,16 +1,14 @@ //! Contains the generic `SearchResult` and `SearchResultHit` structs -use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use serde::{Deserialize, Serialize, de::DeserializeOwned}; use serde_json::Value; use typesense_codegen::models as raw_models; -/// Represents a single search result hit, with the document deserialized into a strongly-typed struct `T`. +/// Represents a single search result hit, with the document deserialized into a strongly-typed struct `D`. /// -/// This struct is generic over the document type `T`, which must be deserializable from JSON. +/// This struct is generic over the document type `D`, which must be deserializable from JSON. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -// Add this line to help the derive macro with the generic bound. -#[serde(bound(serialize = "T: Serialize", deserialize = "T: DeserializeOwned"))] -pub struct SearchResultHit { +pub struct SearchResultHit { /// (Deprecated) Contains highlighted portions of the search fields #[serde(rename = "highlights", skip_serializing_if = "Option::is_none")] pub highlights: Option>, @@ -19,9 +17,9 @@ pub struct SearchResultHit { #[serde(rename = "highlight", skip_serializing_if = "Option::is_none")] pub highlight: Option>, - /// The full document that was matched, deserialized into type `T`. + /// The full document that was matched, deserialized into type `D`. #[serde(default, skip_serializing_if = "Option::is_none")] - pub document: Option, + pub document: Option, /// The score of the text match. #[serde(rename = "text_match", skip_serializing_if = "Option::is_none")] @@ -45,15 +43,13 @@ pub struct SearchResultHit { /// Represents the full response from a Typesense search query, containing strongly-typed hits. /// -/// This struct is generic over the document type `T`. It is the return type of the +/// This struct is generic over the document type `D`. It is the return type of the /// `documents().search()` method. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -// Add this line to help the derive macro with the generic bound. -#[serde(bound(serialize = "T: Serialize", deserialize = "T: DeserializeOwned"))] -pub struct SearchResult { - /// The search result hits, with documents deserialized into type `T`. +pub struct SearchResult { + /// The search result hits, with documents deserialized into type `D`. #[serde(default, skip_serializing_if = "Option::is_none")] - pub hits: Option>>, + pub hits: Option>>, /// The number of documents found. #[serde(skip_serializing_if = "Option::is_none")] @@ -96,21 +92,21 @@ pub struct SearchResult { pub conversation: Option>, } -impl SearchResult +impl SearchResult where - T: DeserializeOwned, + D: DeserializeOwned, { - /// Transforms a raw, non-generic `SearchResult` from the API into a strongly-typed `SearchResult`. + /// Transforms a raw, non-generic `SearchResult` from the API into a strongly-typed `SearchResult`. pub(crate) fn from_raw( raw_result: raw_models::SearchResult, ) -> Result { let typed_hits = match raw_result.hits { Some(raw_hits) => { - let hits_result: Result>, _> = raw_hits + let hits_result: Result>, _> = raw_hits .into_iter() .map(|raw_hit| { - // Map each raw hit to a Result, _> - let document: Result, _> = raw_hit + // Map each raw hit to a Result, _> + let document: Result, _> = raw_hit .document .map(|doc_value| serde_json::from_value(doc_value)) .transpose(); @@ -151,37 +147,36 @@ where // This impl block specifically targets `SearchResult`. // The methods inside will only be available on a search result of that exact type. impl SearchResult { - /// Attempts to convert a `SearchResult` into a `SearchResult`. + /// Attempts to convert a `SearchResult` into a `SearchResult`. /// /// This method is useful after a `perform_union` call where you know all resulting - /// documents share the same schema and can be deserialized into a single concrete type `T`. + /// documents share the same schema and can be deserialized into a single concrete type `D`. /// /// It iterates through each hit and tries to deserialize its `document` field. If any - /// document fails to deserialize into type `T`, the entire conversion fails. + /// document fails to deserialize into type `D`, the entire conversion fails. /// /// # Type Parameters /// - /// * `T` - The concrete, `DeserializeOwned` type you want to convert the documents into. + /// * `D` - The concrete, `DeserializeOwned` type you want to convert the documents into. /// /// # Errors /// /// Returns a `serde_json::Error` if any document in the hit list cannot be successfully - /// deserialized into `T`. - pub fn try_into_typed(self) -> Result, serde_json::Error> { + /// deserialized into `D`. + pub fn try_into_typed(self) -> Result, serde_json::Error> { // This logic is very similar to `from_raw`, but it converts between generic types // instead of from a raw model. let typed_hits = match self.hits { Some(value_hits) => { - let hits_result: Result>, _> = value_hits + let hits_result: Result>, _> = value_hits .into_iter() .map(|value_hit| { // `value_hit` here is `SearchResultHit` - let document: Option = match value_hit.document { + let document: Option = match value_hit.document { Some(doc_value) => Some(serde_json::from_value(doc_value)?), None => None, }; - // Construct the new, strongly-typed hit. Ok(SearchResultHit { document, highlights: value_hit.highlights, @@ -199,7 +194,6 @@ impl SearchResult { None => None, }; - // Construct the final, strongly-typed search result, carrying over all metadata. Ok(SearchResult { hits: typed_hits, found: self.found,