|
@@ -0,0 +1,11155 @@
|
|
|
+// NOTE: This file was generated by the ServiceGenerator.
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// API:
|
|
|
+// Cloud Vision API (vision/v1)
|
|
|
+// Description:
|
|
|
+// Integrates Google Vision features, including image labeling, face, logo,
|
|
|
+// and landmark detection, optical character recognition (OCR), and detection
|
|
|
+// of explicit content, into applications.
|
|
|
+// Documentation:
|
|
|
+// https://cloud.google.com/vision/
|
|
|
+
|
|
|
+#if GTLR_BUILT_AS_FRAMEWORK
|
|
|
+ #import "GTLR/GTLRObject.h"
|
|
|
+#else
|
|
|
+ #import "GTLRObject.h"
|
|
|
+#endif
|
|
|
+
|
|
|
+#if GTLR_RUNTIME_VERSION != 3000
|
|
|
+#error This file was generated by a different version of ServiceGenerator which is incompatible with this GTLR library source.
|
|
|
+#endif
|
|
|
+
|
|
|
+@class GTLRVision_AnnotateImageRequest;
|
|
|
+@class GTLRVision_AnnotateImageResponse;
|
|
|
+@class GTLRVision_AsyncAnnotateFileRequest;
|
|
|
+@class GTLRVision_AsyncAnnotateFileResponse;
|
|
|
+@class GTLRVision_Block;
|
|
|
+@class GTLRVision_BoundingPoly;
|
|
|
+@class GTLRVision_Color;
|
|
|
+@class GTLRVision_ColorInfo;
|
|
|
+@class GTLRVision_CropHint;
|
|
|
+@class GTLRVision_CropHintsAnnotation;
|
|
|
+@class GTLRVision_CropHintsParams;
|
|
|
+@class GTLRVision_DetectedBreak;
|
|
|
+@class GTLRVision_DetectedLanguage;
|
|
|
+@class GTLRVision_DominantColorsAnnotation;
|
|
|
+@class GTLRVision_EntityAnnotation;
|
|
|
+@class GTLRVision_FaceAnnotation;
|
|
|
+@class GTLRVision_Feature;
|
|
|
+@class GTLRVision_GcsDestination;
|
|
|
+@class GTLRVision_GcsSource;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1AnnotateImageResponse;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1AsyncAnnotateFileResponse;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1Block;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1BoundingPoly;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1ColorInfo;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1CropHint;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1CropHintsAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1DominantColorsAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1EntityAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1GcsDestination;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1GcsSource;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1ImageAnnotationContext;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1ImageProperties;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1InputConfig;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1LocalizedObjectAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1LocationInfo;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1NormalizedVertex;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1OutputConfig;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1Page;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1Paragraph;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1Position;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1Product;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1ProductKeyValue;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1ProductSearchResults;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1ProductSearchResultsGroupedResult;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1ProductSearchResultsResult;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1Property;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1Symbol;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedLanguage;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationTextProperty;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1Vertex;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1WebDetection;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1WebDetectionWebEntity;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1WebDetectionWebImage;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1WebDetectionWebLabel;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1WebDetectionWebPage;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p1beta1Word;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1AnnotateImageResponse;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1AsyncAnnotateFileResponse;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1Block;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1BoundingPoly;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1ColorInfo;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1CropHint;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1CropHintsAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1DominantColorsAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1EntityAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1GcsDestination;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1GcsSource;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1ImageAnnotationContext;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1ImageProperties;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1InputConfig;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1LocalizedObjectAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1LocationInfo;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1NormalizedVertex;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1OutputConfig;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1Page;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1Paragraph;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1Position;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1Product;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1ProductKeyValue;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1ProductSearchResults;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1ProductSearchResultsGroupedResult;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1ProductSearchResultsResult;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1Property;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1Symbol;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedLanguage;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationTextProperty;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1Vertex;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1WebDetection;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1WebDetectionWebEntity;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1WebDetectionWebImage;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1WebDetectionWebLabel;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1WebDetectionWebPage;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p2beta1Word;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1AnnotateImageResponse;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1AsyncAnnotateFileResponse;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1Block;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1BoundingPoly;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1ColorInfo;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1CropHint;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1CropHintsAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1DominantColorsAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1EntityAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1GcsDestination;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1GcsSource;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1ImageAnnotationContext;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1ImageProperties;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1InputConfig;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1LocalizedObjectAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1LocationInfo;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1NormalizedVertex;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1OutputConfig;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1Page;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1Paragraph;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1Position;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1Product;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1ProductKeyValue;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1ProductSearchResults;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1ProductSearchResultsGroupedResult;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1ProductSearchResultsResult;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1Property;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1ReferenceImage;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1Symbol;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotation;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedBreak;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedLanguage;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationTextProperty;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1Vertex;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1WebDetection;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1WebDetectionWebEntity;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1WebDetectionWebImage;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1WebDetectionWebLabel;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1WebDetectionWebPage;
|
|
|
+@class GTLRVision_GoogleCloudVisionV1p3beta1Word;
|
|
|
+@class GTLRVision_GroupedResult;
|
|
|
+@class GTLRVision_Image;
|
|
|
+@class GTLRVision_ImageAnnotationContext;
|
|
|
+@class GTLRVision_ImageContext;
|
|
|
+@class GTLRVision_ImageProperties;
|
|
|
+@class GTLRVision_ImageSource;
|
|
|
+@class GTLRVision_ImportProductSetsGcsSource;
|
|
|
+@class GTLRVision_ImportProductSetsInputConfig;
|
|
|
+@class GTLRVision_InputConfig;
|
|
|
+@class GTLRVision_KeyValue;
|
|
|
+@class GTLRVision_Landmark;
|
|
|
+@class GTLRVision_LatLng;
|
|
|
+@class GTLRVision_LatLongRect;
|
|
|
+@class GTLRVision_LocalizedObjectAnnotation;
|
|
|
+@class GTLRVision_LocationInfo;
|
|
|
+@class GTLRVision_NormalizedVertex;
|
|
|
+@class GTLRVision_Operation;
|
|
|
+@class GTLRVision_Operation_Metadata;
|
|
|
+@class GTLRVision_Operation_Response;
|
|
|
+@class GTLRVision_OutputConfig;
|
|
|
+@class GTLRVision_Page;
|
|
|
+@class GTLRVision_Paragraph;
|
|
|
+@class GTLRVision_Position;
|
|
|
+@class GTLRVision_Product;
|
|
|
+@class GTLRVision_ProductSearchParams;
|
|
|
+@class GTLRVision_ProductSearchResults;
|
|
|
+@class GTLRVision_ProductSet;
|
|
|
+@class GTLRVision_Property;
|
|
|
+@class GTLRVision_ReferenceImage;
|
|
|
+@class GTLRVision_Result;
|
|
|
+@class GTLRVision_SafeSearchAnnotation;
|
|
|
+@class GTLRVision_Status;
|
|
|
+@class GTLRVision_Status_Details_Item;
|
|
|
+@class GTLRVision_Symbol;
|
|
|
+@class GTLRVision_TextAnnotation;
|
|
|
+@class GTLRVision_TextProperty;
|
|
|
+@class GTLRVision_Vertex;
|
|
|
+@class GTLRVision_WebDetection;
|
|
|
+@class GTLRVision_WebDetectionParams;
|
|
|
+@class GTLRVision_WebEntity;
|
|
|
+@class GTLRVision_WebImage;
|
|
|
+@class GTLRVision_WebLabel;
|
|
|
+@class GTLRVision_WebPage;
|
|
|
+@class GTLRVision_Word;
|
|
|
+
|
|
|
+// Generated comments include content from the discovery document; avoid them
|
|
|
+// causing warnings since clang's checks are some what arbitrary.
|
|
|
+#pragma clang diagnostic push
|
|
|
+#pragma clang diagnostic ignored "-Wdocumentation"
|
|
|
+
|
|
|
+NS_ASSUME_NONNULL_BEGIN
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// Constants - For some of the classes' properties below.
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_BatchOperationMetadata.state
|
|
|
+
|
|
|
+/**
|
|
|
+ * The request is done after the longrunning.Operations.CancelOperation has
|
|
|
+ * been called by the user. Any records that were processed before the
|
|
|
+ * cancel command are output as specified in the request.
|
|
|
+ *
|
|
|
+ * Value: "CANCELLED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_BatchOperationMetadata_State_Cancelled;
|
|
|
+/**
|
|
|
+ * The request is done and no item has been successfully processed.
|
|
|
+ *
|
|
|
+ * Value: "FAILED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_BatchOperationMetadata_State_Failed;
|
|
|
+/**
|
|
|
+ * Request is actively being processed.
|
|
|
+ *
|
|
|
+ * Value: "PROCESSING"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_BatchOperationMetadata_State_Processing;
|
|
|
+/**
|
|
|
+ * Invalid.
|
|
|
+ *
|
|
|
+ * Value: "STATE_UNSPECIFIED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_BatchOperationMetadata_State_StateUnspecified;
|
|
|
+/**
|
|
|
+ * The request is done and at least one item has been successfully
|
|
|
+ * processed.
|
|
|
+ *
|
|
|
+ * Value: "SUCCESSFUL"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_BatchOperationMetadata_State_Successful;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_Block.blockType
|
|
|
+
|
|
|
+/**
|
|
|
+ * Barcode block.
|
|
|
+ *
|
|
|
+ * Value: "BARCODE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Block_BlockType_Barcode;
|
|
|
+/**
|
|
|
+ * Image block.
|
|
|
+ *
|
|
|
+ * Value: "PICTURE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Block_BlockType_Picture;
|
|
|
+/**
|
|
|
+ * Horizontal/vertical line box.
|
|
|
+ *
|
|
|
+ * Value: "RULER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Block_BlockType_Ruler;
|
|
|
+/**
|
|
|
+ * Table block.
|
|
|
+ *
|
|
|
+ * Value: "TABLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Block_BlockType_Table;
|
|
|
+/**
|
|
|
+ * Regular text block.
|
|
|
+ *
|
|
|
+ * Value: "TEXT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Block_BlockType_Text;
|
|
|
+/**
|
|
|
+ * Unknown block type.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Block_BlockType_Unknown;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_DetectedBreak.type
|
|
|
+
|
|
|
+/**
|
|
|
+ * Line-wrapping break.
|
|
|
+ *
|
|
|
+ * Value: "EOL_SURE_SPACE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_DetectedBreak_Type_EolSureSpace;
|
|
|
+/**
|
|
|
+ * End-line hyphen that is not present in text; does not co-occur with
|
|
|
+ * `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`.
|
|
|
+ *
|
|
|
+ * Value: "HYPHEN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_DetectedBreak_Type_Hyphen;
|
|
|
+/**
|
|
|
+ * Line break that ends a paragraph.
|
|
|
+ *
|
|
|
+ * Value: "LINE_BREAK"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_DetectedBreak_Type_LineBreak;
|
|
|
+/**
|
|
|
+ * Regular space.
|
|
|
+ *
|
|
|
+ * Value: "SPACE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_DetectedBreak_Type_Space;
|
|
|
+/**
|
|
|
+ * Sure space (very wide).
|
|
|
+ *
|
|
|
+ * Value: "SURE_SPACE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_DetectedBreak_Type_SureSpace;
|
|
|
+/**
|
|
|
+ * Unknown break label type.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_DetectedBreak_Type_Unknown;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_FaceAnnotation.angerLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_AngerLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_AngerLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_AngerLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_AngerLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_AngerLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_AngerLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_FaceAnnotation.blurredLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_BlurredLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_BlurredLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_BlurredLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_BlurredLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_BlurredLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_BlurredLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_FaceAnnotation.headwearLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_HeadwearLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_HeadwearLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_HeadwearLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_HeadwearLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_HeadwearLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_HeadwearLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_FaceAnnotation.joyLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_JoyLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_JoyLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_JoyLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_JoyLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_JoyLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_JoyLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_FaceAnnotation.sorrowLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_SorrowLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_SorrowLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_SorrowLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_SorrowLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_SorrowLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_SorrowLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_FaceAnnotation.surpriseLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_SurpriseLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_SurpriseLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_SurpriseLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_SurpriseLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_SurpriseLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_SurpriseLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_FaceAnnotation.underExposedLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_UnderExposedLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_UnderExposedLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_UnderExposedLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_UnderExposedLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_UnderExposedLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_FaceAnnotation_UnderExposedLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_Feature.type
|
|
|
+
|
|
|
+/**
|
|
|
+ * Run crop hints.
|
|
|
+ *
|
|
|
+ * Value: "CROP_HINTS"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Feature_Type_CropHints;
|
|
|
+/**
|
|
|
+ * Run dense text document OCR. Takes precedence when both
|
|
|
+ * `DOCUMENT_TEXT_DETECTION` and `TEXT_DETECTION` are present.
|
|
|
+ *
|
|
|
+ * Value: "DOCUMENT_TEXT_DETECTION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Feature_Type_DocumentTextDetection;
|
|
|
+/**
|
|
|
+ * Run face detection.
|
|
|
+ *
|
|
|
+ * Value: "FACE_DETECTION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Feature_Type_FaceDetection;
|
|
|
+/**
|
|
|
+ * Compute a set of image properties, such as the
|
|
|
+ * image's dominant colors.
|
|
|
+ *
|
|
|
+ * Value: "IMAGE_PROPERTIES"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Feature_Type_ImageProperties;
|
|
|
+/**
|
|
|
+ * Run label detection.
|
|
|
+ *
|
|
|
+ * Value: "LABEL_DETECTION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Feature_Type_LabelDetection;
|
|
|
+/**
|
|
|
+ * Run landmark detection.
|
|
|
+ *
|
|
|
+ * Value: "LANDMARK_DETECTION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Feature_Type_LandmarkDetection;
|
|
|
+/**
|
|
|
+ * Run logo detection.
|
|
|
+ *
|
|
|
+ * Value: "LOGO_DETECTION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Feature_Type_LogoDetection;
|
|
|
+/**
|
|
|
+ * Run localizer for object detection.
|
|
|
+ *
|
|
|
+ * Value: "OBJECT_LOCALIZATION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Feature_Type_ObjectLocalization;
|
|
|
+/**
|
|
|
+ * Run Product Search.
|
|
|
+ *
|
|
|
+ * Value: "PRODUCT_SEARCH"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Feature_Type_ProductSearch;
|
|
|
+/**
|
|
|
+ * Run Safe Search to detect potentially unsafe
|
|
|
+ * or undesirable content.
|
|
|
+ *
|
|
|
+ * Value: "SAFE_SEARCH_DETECTION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Feature_Type_SafeSearchDetection;
|
|
|
+/**
|
|
|
+ * Run text detection / optical character recognition (OCR). Text detection
|
|
|
+ * is optimized for areas of text within a larger image; if the image is
|
|
|
+ * a document, use `DOCUMENT_TEXT_DETECTION` instead.
|
|
|
+ *
|
|
|
+ * Value: "TEXT_DETECTION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Feature_Type_TextDetection;
|
|
|
+/**
|
|
|
+ * Unspecified feature type.
|
|
|
+ *
|
|
|
+ * Value: "TYPE_UNSPECIFIED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Feature_Type_TypeUnspecified;
|
|
|
+/**
|
|
|
+ * Run web detection.
|
|
|
+ *
|
|
|
+ * Value: "WEB_DETECTION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Feature_Type_WebDetection;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p1beta1Block.blockType
|
|
|
+
|
|
|
+/**
|
|
|
+ * Barcode block.
|
|
|
+ *
|
|
|
+ * Value: "BARCODE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1Block_BlockType_Barcode;
|
|
|
+/**
|
|
|
+ * Image block.
|
|
|
+ *
|
|
|
+ * Value: "PICTURE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1Block_BlockType_Picture;
|
|
|
+/**
|
|
|
+ * Horizontal/vertical line box.
|
|
|
+ *
|
|
|
+ * Value: "RULER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1Block_BlockType_Ruler;
|
|
|
+/**
|
|
|
+ * Table block.
|
|
|
+ *
|
|
|
+ * Value: "TABLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1Block_BlockType_Table;
|
|
|
+/**
|
|
|
+ * Regular text block.
|
|
|
+ *
|
|
|
+ * Value: "TEXT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1Block_BlockType_Text;
|
|
|
+/**
|
|
|
+ * Unknown block type.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1Block_BlockType_Unknown;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation.angerLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_AngerLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_AngerLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_AngerLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_AngerLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_AngerLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_AngerLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation.blurredLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_BlurredLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_BlurredLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_BlurredLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_BlurredLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_BlurredLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_BlurredLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation.headwearLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_HeadwearLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_HeadwearLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_HeadwearLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_HeadwearLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_HeadwearLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_HeadwearLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation.joyLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_JoyLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_JoyLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_JoyLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_JoyLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_JoyLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_JoyLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation.sorrowLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SorrowLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SorrowLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SorrowLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SorrowLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SorrowLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SorrowLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation.surpriseLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SurpriseLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SurpriseLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SurpriseLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SurpriseLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SurpriseLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SurpriseLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation.underExposedLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_UnderExposedLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_UnderExposedLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_UnderExposedLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_UnderExposedLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_UnderExposedLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_UnderExposedLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark.type
|
|
|
+
|
|
|
+/**
|
|
|
+ * Chin gnathion.
|
|
|
+ *
|
|
|
+ * Value: "CHIN_GNATHION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_ChinGnathion;
|
|
|
+/**
|
|
|
+ * Chin left gonion.
|
|
|
+ *
|
|
|
+ * Value: "CHIN_LEFT_GONION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_ChinLeftGonion;
|
|
|
+/**
|
|
|
+ * Chin right gonion.
|
|
|
+ *
|
|
|
+ * Value: "CHIN_RIGHT_GONION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_ChinRightGonion;
|
|
|
+/**
|
|
|
+ * Forehead glabella.
|
|
|
+ *
|
|
|
+ * Value: "FOREHEAD_GLABELLA"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_ForeheadGlabella;
|
|
|
+/**
|
|
|
+ * Left ear tragion.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EAR_TRAGION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftEarTragion;
|
|
|
+/**
|
|
|
+ * Left eye.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftEye;
|
|
|
+/**
|
|
|
+ * Left eye, bottom boundary.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_BOTTOM_BOUNDARY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftEyeBottomBoundary;
|
|
|
+/**
|
|
|
+ * Left eyebrow, upper midpoint.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYEBROW_UPPER_MIDPOINT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftEyebrowUpperMidpoint;
|
|
|
+/**
|
|
|
+ * Left eye, left corner.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_LEFT_CORNER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftEyeLeftCorner;
|
|
|
+/**
|
|
|
+ * Left eye pupil.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_PUPIL"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftEyePupil;
|
|
|
+/**
|
|
|
+ * Left eye, right corner.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_RIGHT_CORNER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftEyeRightCorner;
|
|
|
+/**
|
|
|
+ * Left eye, top boundary.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_TOP_BOUNDARY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftEyeTopBoundary;
|
|
|
+/**
|
|
|
+ * Left of left eyebrow.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_OF_LEFT_EYEBROW"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftOfLeftEyebrow;
|
|
|
+/**
|
|
|
+ * Left of right eyebrow.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_OF_RIGHT_EYEBROW"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftOfRightEyebrow;
|
|
|
+/**
|
|
|
+ * Lower lip.
|
|
|
+ *
|
|
|
+ * Value: "LOWER_LIP"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LowerLip;
|
|
|
+/**
|
|
|
+ * Midpoint between eyes.
|
|
|
+ *
|
|
|
+ * Value: "MIDPOINT_BETWEEN_EYES"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_MidpointBetweenEyes;
|
|
|
+/**
|
|
|
+ * Mouth center.
|
|
|
+ *
|
|
|
+ * Value: "MOUTH_CENTER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_MouthCenter;
|
|
|
+/**
|
|
|
+ * Mouth left.
|
|
|
+ *
|
|
|
+ * Value: "MOUTH_LEFT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_MouthLeft;
|
|
|
+/**
|
|
|
+ * Mouth right.
|
|
|
+ *
|
|
|
+ * Value: "MOUTH_RIGHT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_MouthRight;
|
|
|
+/**
|
|
|
+ * Nose, bottom center.
|
|
|
+ *
|
|
|
+ * Value: "NOSE_BOTTOM_CENTER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_NoseBottomCenter;
|
|
|
+/**
|
|
|
+ * Nose, bottom left.
|
|
|
+ *
|
|
|
+ * Value: "NOSE_BOTTOM_LEFT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_NoseBottomLeft;
|
|
|
+/**
|
|
|
+ * Nose, bottom right.
|
|
|
+ *
|
|
|
+ * Value: "NOSE_BOTTOM_RIGHT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_NoseBottomRight;
|
|
|
+/**
|
|
|
+ * Nose tip.
|
|
|
+ *
|
|
|
+ * Value: "NOSE_TIP"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_NoseTip;
|
|
|
+/**
|
|
|
+ * Right ear tragion.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EAR_TRAGION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightEarTragion;
|
|
|
+/**
|
|
|
+ * Right eye.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightEye;
|
|
|
+/**
|
|
|
+ * Right eye, bottom boundary.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_BOTTOM_BOUNDARY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightEyeBottomBoundary;
|
|
|
+/**
|
|
|
+ * Right eyebrow, upper midpoint.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYEBROW_UPPER_MIDPOINT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightEyebrowUpperMidpoint;
|
|
|
+/**
|
|
|
+ * Right eye, left corner.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_LEFT_CORNER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightEyeLeftCorner;
|
|
|
+/**
|
|
|
+ * Right eye pupil.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_PUPIL"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightEyePupil;
|
|
|
+/**
|
|
|
+ * Right eye, right corner.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_RIGHT_CORNER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightEyeRightCorner;
|
|
|
+/**
|
|
|
+ * Right eye, top boundary.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_TOP_BOUNDARY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightEyeTopBoundary;
|
|
|
+/**
|
|
|
+ * Right of left eyebrow.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_OF_LEFT_EYEBROW"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightOfLeftEyebrow;
|
|
|
+/**
|
|
|
+ * Right of right eyebrow.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_OF_RIGHT_EYEBROW"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightOfRightEyebrow;
|
|
|
+/**
|
|
|
+ * Unknown face landmark detected. Should not be filled.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN_LANDMARK"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_UnknownLandmark;
|
|
|
+/**
|
|
|
+ * Upper lip.
|
|
|
+ *
|
|
|
+ * Value: "UPPER_LIP"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_UpperLip;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p1beta1OperationMetadata.state
|
|
|
+
|
|
|
+/**
|
|
|
+ * The batch processing was cancelled.
|
|
|
+ *
|
|
|
+ * Value: "CANCELLED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1OperationMetadata_State_Cancelled;
|
|
|
+/**
|
|
|
+ * Request is received.
|
|
|
+ *
|
|
|
+ * Value: "CREATED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1OperationMetadata_State_Created;
|
|
|
+/**
|
|
|
+ * The batch processing is done.
|
|
|
+ *
|
|
|
+ * Value: "DONE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1OperationMetadata_State_Done;
|
|
|
+/**
|
|
|
+ * Request is actively being processed.
|
|
|
+ *
|
|
|
+ * Value: "RUNNING"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1OperationMetadata_State_Running;
|
|
|
+/**
|
|
|
+ * Invalid.
|
|
|
+ *
|
|
|
+ * Value: "STATE_UNSPECIFIED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1OperationMetadata_State_StateUnspecified;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation.adult
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Adult_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Adult_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Adult_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Adult_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Adult_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Adult_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation.medical
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Medical_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Medical_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Medical_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Medical_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Medical_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Medical_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation.racy
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Racy_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Racy_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Racy_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Racy_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Racy_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Racy_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation.spoof
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Spoof_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Spoof_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Spoof_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Spoof_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Spoof_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Spoof_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation.violence
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Violence_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Violence_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Violence_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Violence_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Violence_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Violence_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak.type
|
|
|
+
|
|
|
+/**
|
|
|
+ * Line-wrapping break.
|
|
|
+ *
|
|
|
+ * Value: "EOL_SURE_SPACE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak_Type_EolSureSpace;
|
|
|
+/**
|
|
|
+ * End-line hyphen that is not present in text; does not co-occur with
|
|
|
+ * `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`.
|
|
|
+ *
|
|
|
+ * Value: "HYPHEN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak_Type_Hyphen;
|
|
|
+/**
|
|
|
+ * Line break that ends a paragraph.
|
|
|
+ *
|
|
|
+ * Value: "LINE_BREAK"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak_Type_LineBreak;
|
|
|
+/**
|
|
|
+ * Regular space.
|
|
|
+ *
|
|
|
+ * Value: "SPACE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak_Type_Space;
|
|
|
+/**
|
|
|
+ * Sure space (very wide).
|
|
|
+ *
|
|
|
+ * Value: "SURE_SPACE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak_Type_SureSpace;
|
|
|
+/**
|
|
|
+ * Unknown break label type.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak_Type_Unknown;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p2beta1Block.blockType
|
|
|
+
|
|
|
+/**
|
|
|
+ * Barcode block.
|
|
|
+ *
|
|
|
+ * Value: "BARCODE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1Block_BlockType_Barcode;
|
|
|
+/**
|
|
|
+ * Image block.
|
|
|
+ *
|
|
|
+ * Value: "PICTURE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1Block_BlockType_Picture;
|
|
|
+/**
|
|
|
+ * Horizontal/vertical line box.
|
|
|
+ *
|
|
|
+ * Value: "RULER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1Block_BlockType_Ruler;
|
|
|
+/**
|
|
|
+ * Table block.
|
|
|
+ *
|
|
|
+ * Value: "TABLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1Block_BlockType_Table;
|
|
|
+/**
|
|
|
+ * Regular text block.
|
|
|
+ *
|
|
|
+ * Value: "TEXT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1Block_BlockType_Text;
|
|
|
+/**
|
|
|
+ * Unknown block type.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1Block_BlockType_Unknown;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation.angerLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_AngerLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_AngerLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_AngerLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_AngerLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_AngerLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_AngerLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation.blurredLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_BlurredLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_BlurredLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_BlurredLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_BlurredLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_BlurredLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_BlurredLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation.headwearLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_HeadwearLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_HeadwearLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_HeadwearLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_HeadwearLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_HeadwearLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_HeadwearLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation.joyLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_JoyLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_JoyLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_JoyLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_JoyLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_JoyLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_JoyLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation.sorrowLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SorrowLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SorrowLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SorrowLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SorrowLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SorrowLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SorrowLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation.surpriseLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SurpriseLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SurpriseLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SurpriseLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SurpriseLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SurpriseLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SurpriseLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation.underExposedLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_UnderExposedLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_UnderExposedLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_UnderExposedLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_UnderExposedLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_UnderExposedLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_UnderExposedLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark.type
|
|
|
+
|
|
|
+/**
|
|
|
+ * Chin gnathion.
|
|
|
+ *
|
|
|
+ * Value: "CHIN_GNATHION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_ChinGnathion;
|
|
|
+/**
|
|
|
+ * Chin left gonion.
|
|
|
+ *
|
|
|
+ * Value: "CHIN_LEFT_GONION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_ChinLeftGonion;
|
|
|
+/**
|
|
|
+ * Chin right gonion.
|
|
|
+ *
|
|
|
+ * Value: "CHIN_RIGHT_GONION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_ChinRightGonion;
|
|
|
+/**
|
|
|
+ * Forehead glabella.
|
|
|
+ *
|
|
|
+ * Value: "FOREHEAD_GLABELLA"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_ForeheadGlabella;
|
|
|
+/**
|
|
|
+ * Left ear tragion.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EAR_TRAGION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftEarTragion;
|
|
|
+/**
|
|
|
+ * Left eye.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftEye;
|
|
|
+/**
|
|
|
+ * Left eye, bottom boundary.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_BOTTOM_BOUNDARY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftEyeBottomBoundary;
|
|
|
+/**
|
|
|
+ * Left eyebrow, upper midpoint.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYEBROW_UPPER_MIDPOINT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftEyebrowUpperMidpoint;
|
|
|
+/**
|
|
|
+ * Left eye, left corner.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_LEFT_CORNER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftEyeLeftCorner;
|
|
|
+/**
|
|
|
+ * Left eye pupil.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_PUPIL"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftEyePupil;
|
|
|
+/**
|
|
|
+ * Left eye, right corner.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_RIGHT_CORNER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftEyeRightCorner;
|
|
|
+/**
|
|
|
+ * Left eye, top boundary.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_TOP_BOUNDARY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftEyeTopBoundary;
|
|
|
+/**
|
|
|
+ * Left of left eyebrow.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_OF_LEFT_EYEBROW"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftOfLeftEyebrow;
|
|
|
+/**
|
|
|
+ * Left of right eyebrow.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_OF_RIGHT_EYEBROW"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftOfRightEyebrow;
|
|
|
+/**
|
|
|
+ * Lower lip.
|
|
|
+ *
|
|
|
+ * Value: "LOWER_LIP"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LowerLip;
|
|
|
+/**
|
|
|
+ * Midpoint between eyes.
|
|
|
+ *
|
|
|
+ * Value: "MIDPOINT_BETWEEN_EYES"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_MidpointBetweenEyes;
|
|
|
+/**
|
|
|
+ * Mouth center.
|
|
|
+ *
|
|
|
+ * Value: "MOUTH_CENTER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_MouthCenter;
|
|
|
+/**
|
|
|
+ * Mouth left.
|
|
|
+ *
|
|
|
+ * Value: "MOUTH_LEFT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_MouthLeft;
|
|
|
+/**
|
|
|
+ * Mouth right.
|
|
|
+ *
|
|
|
+ * Value: "MOUTH_RIGHT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_MouthRight;
|
|
|
+/**
|
|
|
+ * Nose, bottom center.
|
|
|
+ *
|
|
|
+ * Value: "NOSE_BOTTOM_CENTER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_NoseBottomCenter;
|
|
|
+/**
|
|
|
+ * Nose, bottom left.
|
|
|
+ *
|
|
|
+ * Value: "NOSE_BOTTOM_LEFT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_NoseBottomLeft;
|
|
|
+/**
|
|
|
+ * Nose, bottom right.
|
|
|
+ *
|
|
|
+ * Value: "NOSE_BOTTOM_RIGHT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_NoseBottomRight;
|
|
|
+/**
|
|
|
+ * Nose tip.
|
|
|
+ *
|
|
|
+ * Value: "NOSE_TIP"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_NoseTip;
|
|
|
+/**
|
|
|
+ * Right ear tragion.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EAR_TRAGION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightEarTragion;
|
|
|
+/**
|
|
|
+ * Right eye.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightEye;
|
|
|
+/**
|
|
|
+ * Right eye, bottom boundary.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_BOTTOM_BOUNDARY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightEyeBottomBoundary;
|
|
|
+/**
|
|
|
+ * Right eyebrow, upper midpoint.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYEBROW_UPPER_MIDPOINT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightEyebrowUpperMidpoint;
|
|
|
+/**
|
|
|
+ * Right eye, left corner.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_LEFT_CORNER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightEyeLeftCorner;
|
|
|
+/**
|
|
|
+ * Right eye pupil.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_PUPIL"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightEyePupil;
|
|
|
+/**
|
|
|
+ * Right eye, right corner.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_RIGHT_CORNER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightEyeRightCorner;
|
|
|
+/**
|
|
|
+ * Right eye, top boundary.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_TOP_BOUNDARY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightEyeTopBoundary;
|
|
|
+/**
|
|
|
+ * Right of left eyebrow.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_OF_LEFT_EYEBROW"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightOfLeftEyebrow;
|
|
|
+/**
|
|
|
+ * Right of right eyebrow.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_OF_RIGHT_EYEBROW"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightOfRightEyebrow;
|
|
|
+/**
|
|
|
+ * Unknown face landmark detected. Should not be filled.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN_LANDMARK"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_UnknownLandmark;
|
|
|
+/**
|
|
|
+ * Upper lip.
|
|
|
+ *
|
|
|
+ * Value: "UPPER_LIP"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_UpperLip;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p2beta1OperationMetadata.state
|
|
|
+
|
|
|
+/**
|
|
|
+ * The batch processing was cancelled.
|
|
|
+ *
|
|
|
+ * Value: "CANCELLED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1OperationMetadata_State_Cancelled;
|
|
|
+/**
|
|
|
+ * Request is received.
|
|
|
+ *
|
|
|
+ * Value: "CREATED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1OperationMetadata_State_Created;
|
|
|
+/**
|
|
|
+ * The batch processing is done.
|
|
|
+ *
|
|
|
+ * Value: "DONE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1OperationMetadata_State_Done;
|
|
|
+/**
|
|
|
+ * Request is actively being processed.
|
|
|
+ *
|
|
|
+ * Value: "RUNNING"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1OperationMetadata_State_Running;
|
|
|
+/**
|
|
|
+ * Invalid.
|
|
|
+ *
|
|
|
+ * Value: "STATE_UNSPECIFIED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1OperationMetadata_State_StateUnspecified;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation.adult
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Adult_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Adult_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Adult_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Adult_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Adult_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Adult_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation.medical
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Medical_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Medical_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Medical_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Medical_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Medical_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Medical_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation.racy
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Racy_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Racy_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Racy_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Racy_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Racy_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Racy_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation.spoof
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Spoof_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Spoof_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Spoof_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Spoof_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Spoof_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Spoof_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation.violence
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Violence_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Violence_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Violence_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Violence_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Violence_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Violence_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak.type
|
|
|
+
|
|
|
+/**
|
|
|
+ * Line-wrapping break.
|
|
|
+ *
|
|
|
+ * Value: "EOL_SURE_SPACE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak_Type_EolSureSpace;
|
|
|
+/**
|
|
|
+ * End-line hyphen that is not present in text; does not co-occur with
|
|
|
+ * `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`.
|
|
|
+ *
|
|
|
+ * Value: "HYPHEN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak_Type_Hyphen;
|
|
|
+/**
|
|
|
+ * Line break that ends a paragraph.
|
|
|
+ *
|
|
|
+ * Value: "LINE_BREAK"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak_Type_LineBreak;
|
|
|
+/**
|
|
|
+ * Regular space.
|
|
|
+ *
|
|
|
+ * Value: "SPACE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak_Type_Space;
|
|
|
+/**
|
|
|
+ * Sure space (very wide).
|
|
|
+ *
|
|
|
+ * Value: "SURE_SPACE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak_Type_SureSpace;
|
|
|
+/**
|
|
|
+ * Unknown break label type.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak_Type_Unknown;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1BatchOperationMetadata.state
|
|
|
+
|
|
|
+/**
|
|
|
+ * The request is done after the longrunning.Operations.CancelOperation has
|
|
|
+ * been called by the user. Any records that were processed before the
|
|
|
+ * cancel command are output as specified in the request.
|
|
|
+ *
|
|
|
+ * Value: "CANCELLED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1BatchOperationMetadata_State_Cancelled;
|
|
|
+/**
|
|
|
+ * The request is done and no item has been successfully processed.
|
|
|
+ *
|
|
|
+ * Value: "FAILED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1BatchOperationMetadata_State_Failed;
|
|
|
+/**
|
|
|
+ * Request is actively being processed.
|
|
|
+ *
|
|
|
+ * Value: "PROCESSING"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1BatchOperationMetadata_State_Processing;
|
|
|
+/**
|
|
|
+ * Invalid.
|
|
|
+ *
|
|
|
+ * Value: "STATE_UNSPECIFIED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1BatchOperationMetadata_State_StateUnspecified;
|
|
|
+/**
|
|
|
+ * The request is done and at least one item has been successfully
|
|
|
+ * processed.
|
|
|
+ *
|
|
|
+ * Value: "SUCCESSFUL"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1BatchOperationMetadata_State_Successful;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1Block.blockType
|
|
|
+
|
|
|
+/**
|
|
|
+ * Barcode block.
|
|
|
+ *
|
|
|
+ * Value: "BARCODE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1Block_BlockType_Barcode;
|
|
|
+/**
|
|
|
+ * Image block.
|
|
|
+ *
|
|
|
+ * Value: "PICTURE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1Block_BlockType_Picture;
|
|
|
+/**
|
|
|
+ * Horizontal/vertical line box.
|
|
|
+ *
|
|
|
+ * Value: "RULER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1Block_BlockType_Ruler;
|
|
|
+/**
|
|
|
+ * Table block.
|
|
|
+ *
|
|
|
+ * Value: "TABLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1Block_BlockType_Table;
|
|
|
+/**
|
|
|
+ * Regular text block.
|
|
|
+ *
|
|
|
+ * Value: "TEXT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1Block_BlockType_Text;
|
|
|
+/**
|
|
|
+ * Unknown block type.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1Block_BlockType_Unknown;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation.angerLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_AngerLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_AngerLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_AngerLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_AngerLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_AngerLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_AngerLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation.blurredLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_BlurredLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_BlurredLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_BlurredLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_BlurredLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_BlurredLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_BlurredLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation.headwearLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_HeadwearLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_HeadwearLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_HeadwearLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_HeadwearLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_HeadwearLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_HeadwearLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation.joyLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_JoyLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_JoyLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_JoyLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_JoyLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_JoyLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_JoyLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation.sorrowLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SorrowLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SorrowLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SorrowLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SorrowLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SorrowLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SorrowLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation.surpriseLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SurpriseLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SurpriseLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SurpriseLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SurpriseLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SurpriseLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SurpriseLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation.underExposedLikelihood
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_UnderExposedLikelihood_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_UnderExposedLikelihood_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_UnderExposedLikelihood_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_UnderExposedLikelihood_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_UnderExposedLikelihood_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_UnderExposedLikelihood_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark.type
|
|
|
+
|
|
|
+/**
|
|
|
+ * Chin gnathion.
|
|
|
+ *
|
|
|
+ * Value: "CHIN_GNATHION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_ChinGnathion;
|
|
|
+/**
|
|
|
+ * Chin left gonion.
|
|
|
+ *
|
|
|
+ * Value: "CHIN_LEFT_GONION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_ChinLeftGonion;
|
|
|
+/**
|
|
|
+ * Chin right gonion.
|
|
|
+ *
|
|
|
+ * Value: "CHIN_RIGHT_GONION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_ChinRightGonion;
|
|
|
+/**
|
|
|
+ * Forehead glabella.
|
|
|
+ *
|
|
|
+ * Value: "FOREHEAD_GLABELLA"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_ForeheadGlabella;
|
|
|
+/**
|
|
|
+ * Left ear tragion.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EAR_TRAGION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftEarTragion;
|
|
|
+/**
|
|
|
+ * Left eye.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftEye;
|
|
|
+/**
|
|
|
+ * Left eye, bottom boundary.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_BOTTOM_BOUNDARY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftEyeBottomBoundary;
|
|
|
+/**
|
|
|
+ * Left eyebrow, upper midpoint.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYEBROW_UPPER_MIDPOINT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftEyebrowUpperMidpoint;
|
|
|
+/**
|
|
|
+ * Left eye, left corner.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_LEFT_CORNER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftEyeLeftCorner;
|
|
|
+/**
|
|
|
+ * Left eye pupil.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_PUPIL"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftEyePupil;
|
|
|
+/**
|
|
|
+ * Left eye, right corner.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_RIGHT_CORNER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftEyeRightCorner;
|
|
|
+/**
|
|
|
+ * Left eye, top boundary.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_TOP_BOUNDARY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftEyeTopBoundary;
|
|
|
+/**
|
|
|
+ * Left of left eyebrow.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_OF_LEFT_EYEBROW"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftOfLeftEyebrow;
|
|
|
+/**
|
|
|
+ * Left of right eyebrow.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_OF_RIGHT_EYEBROW"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftOfRightEyebrow;
|
|
|
+/**
|
|
|
+ * Lower lip.
|
|
|
+ *
|
|
|
+ * Value: "LOWER_LIP"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LowerLip;
|
|
|
+/**
|
|
|
+ * Midpoint between eyes.
|
|
|
+ *
|
|
|
+ * Value: "MIDPOINT_BETWEEN_EYES"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_MidpointBetweenEyes;
|
|
|
+/**
|
|
|
+ * Mouth center.
|
|
|
+ *
|
|
|
+ * Value: "MOUTH_CENTER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_MouthCenter;
|
|
|
+/**
|
|
|
+ * Mouth left.
|
|
|
+ *
|
|
|
+ * Value: "MOUTH_LEFT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_MouthLeft;
|
|
|
+/**
|
|
|
+ * Mouth right.
|
|
|
+ *
|
|
|
+ * Value: "MOUTH_RIGHT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_MouthRight;
|
|
|
+/**
|
|
|
+ * Nose, bottom center.
|
|
|
+ *
|
|
|
+ * Value: "NOSE_BOTTOM_CENTER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_NoseBottomCenter;
|
|
|
+/**
|
|
|
+ * Nose, bottom left.
|
|
|
+ *
|
|
|
+ * Value: "NOSE_BOTTOM_LEFT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_NoseBottomLeft;
|
|
|
+/**
|
|
|
+ * Nose, bottom right.
|
|
|
+ *
|
|
|
+ * Value: "NOSE_BOTTOM_RIGHT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_NoseBottomRight;
|
|
|
+/**
|
|
|
+ * Nose tip.
|
|
|
+ *
|
|
|
+ * Value: "NOSE_TIP"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_NoseTip;
|
|
|
+/**
|
|
|
+ * Right ear tragion.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EAR_TRAGION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightEarTragion;
|
|
|
+/**
|
|
|
+ * Right eye.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightEye;
|
|
|
+/**
|
|
|
+ * Right eye, bottom boundary.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_BOTTOM_BOUNDARY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightEyeBottomBoundary;
|
|
|
+/**
|
|
|
+ * Right eyebrow, upper midpoint.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYEBROW_UPPER_MIDPOINT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightEyebrowUpperMidpoint;
|
|
|
+/**
|
|
|
+ * Right eye, left corner.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_LEFT_CORNER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightEyeLeftCorner;
|
|
|
+/**
|
|
|
+ * Right eye pupil.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_PUPIL"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightEyePupil;
|
|
|
+/**
|
|
|
+ * Right eye, right corner.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_RIGHT_CORNER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightEyeRightCorner;
|
|
|
+/**
|
|
|
+ * Right eye, top boundary.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_TOP_BOUNDARY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightEyeTopBoundary;
|
|
|
+/**
|
|
|
+ * Right of left eyebrow.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_OF_LEFT_EYEBROW"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightOfLeftEyebrow;
|
|
|
+/**
|
|
|
+ * Right of right eyebrow.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_OF_RIGHT_EYEBROW"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightOfRightEyebrow;
|
|
|
+/**
|
|
|
+ * Unknown face landmark detected. Should not be filled.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN_LANDMARK"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_UnknownLandmark;
|
|
|
+/**
|
|
|
+ * Upper lip.
|
|
|
+ *
|
|
|
+ * Value: "UPPER_LIP"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_UpperLip;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1OperationMetadata.state
|
|
|
+
|
|
|
+/**
|
|
|
+ * The batch processing was cancelled.
|
|
|
+ *
|
|
|
+ * Value: "CANCELLED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1OperationMetadata_State_Cancelled;
|
|
|
+/**
|
|
|
+ * Request is received.
|
|
|
+ *
|
|
|
+ * Value: "CREATED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1OperationMetadata_State_Created;
|
|
|
+/**
|
|
|
+ * The batch processing is done.
|
|
|
+ *
|
|
|
+ * Value: "DONE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1OperationMetadata_State_Done;
|
|
|
+/**
|
|
|
+ * Request is actively being processed.
|
|
|
+ *
|
|
|
+ * Value: "RUNNING"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1OperationMetadata_State_Running;
|
|
|
+/**
|
|
|
+ * Invalid.
|
|
|
+ *
|
|
|
+ * Value: "STATE_UNSPECIFIED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1OperationMetadata_State_StateUnspecified;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation.adult
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Adult_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Adult_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Adult_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Adult_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Adult_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Adult_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation.medical
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Medical_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Medical_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Medical_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Medical_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Medical_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Medical_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation.racy
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Racy_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Racy_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Racy_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Racy_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Racy_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Racy_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation.spoof
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Spoof_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Spoof_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Spoof_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Spoof_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Spoof_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Spoof_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation.violence
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Violence_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Violence_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Violence_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Violence_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Violence_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Violence_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedBreak.type
|
|
|
+
|
|
|
+/**
|
|
|
+ * Line-wrapping break.
|
|
|
+ *
|
|
|
+ * Value: "EOL_SURE_SPACE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedBreak_Type_EolSureSpace;
|
|
|
+/**
|
|
|
+ * End-line hyphen that is not present in text; does not co-occur with
|
|
|
+ * `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`.
|
|
|
+ *
|
|
|
+ * Value: "HYPHEN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedBreak_Type_Hyphen;
|
|
|
+/**
|
|
|
+ * Line break that ends a paragraph.
|
|
|
+ *
|
|
|
+ * Value: "LINE_BREAK"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedBreak_Type_LineBreak;
|
|
|
+/**
|
|
|
+ * Regular space.
|
|
|
+ *
|
|
|
+ * Value: "SPACE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedBreak_Type_Space;
|
|
|
+/**
|
|
|
+ * Sure space (very wide).
|
|
|
+ *
|
|
|
+ * Value: "SURE_SPACE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedBreak_Type_SureSpace;
|
|
|
+/**
|
|
|
+ * Unknown break label type.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedBreak_Type_Unknown;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_Landmark.type
|
|
|
+
|
|
|
+/**
|
|
|
+ * Chin gnathion.
|
|
|
+ *
|
|
|
+ * Value: "CHIN_GNATHION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_ChinGnathion;
|
|
|
+/**
|
|
|
+ * Chin left gonion.
|
|
|
+ *
|
|
|
+ * Value: "CHIN_LEFT_GONION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_ChinLeftGonion;
|
|
|
+/**
|
|
|
+ * Chin right gonion.
|
|
|
+ *
|
|
|
+ * Value: "CHIN_RIGHT_GONION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_ChinRightGonion;
|
|
|
+/**
|
|
|
+ * Forehead glabella.
|
|
|
+ *
|
|
|
+ * Value: "FOREHEAD_GLABELLA"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_ForeheadGlabella;
|
|
|
+/**
|
|
|
+ * Left ear tragion.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EAR_TRAGION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_LeftEarTragion;
|
|
|
+/**
|
|
|
+ * Left eye.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_LeftEye;
|
|
|
+/**
|
|
|
+ * Left eye, bottom boundary.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_BOTTOM_BOUNDARY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_LeftEyeBottomBoundary;
|
|
|
+/**
|
|
|
+ * Left eyebrow, upper midpoint.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYEBROW_UPPER_MIDPOINT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_LeftEyebrowUpperMidpoint;
|
|
|
+/**
|
|
|
+ * Left eye, left corner.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_LEFT_CORNER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_LeftEyeLeftCorner;
|
|
|
+/**
|
|
|
+ * Left eye pupil.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_PUPIL"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_LeftEyePupil;
|
|
|
+/**
|
|
|
+ * Left eye, right corner.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_RIGHT_CORNER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_LeftEyeRightCorner;
|
|
|
+/**
|
|
|
+ * Left eye, top boundary.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_EYE_TOP_BOUNDARY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_LeftEyeTopBoundary;
|
|
|
+/**
|
|
|
+ * Left of left eyebrow.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_OF_LEFT_EYEBROW"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_LeftOfLeftEyebrow;
|
|
|
+/**
|
|
|
+ * Left of right eyebrow.
|
|
|
+ *
|
|
|
+ * Value: "LEFT_OF_RIGHT_EYEBROW"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_LeftOfRightEyebrow;
|
|
|
+/**
|
|
|
+ * Lower lip.
|
|
|
+ *
|
|
|
+ * Value: "LOWER_LIP"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_LowerLip;
|
|
|
+/**
|
|
|
+ * Midpoint between eyes.
|
|
|
+ *
|
|
|
+ * Value: "MIDPOINT_BETWEEN_EYES"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_MidpointBetweenEyes;
|
|
|
+/**
|
|
|
+ * Mouth center.
|
|
|
+ *
|
|
|
+ * Value: "MOUTH_CENTER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_MouthCenter;
|
|
|
+/**
|
|
|
+ * Mouth left.
|
|
|
+ *
|
|
|
+ * Value: "MOUTH_LEFT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_MouthLeft;
|
|
|
+/**
|
|
|
+ * Mouth right.
|
|
|
+ *
|
|
|
+ * Value: "MOUTH_RIGHT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_MouthRight;
|
|
|
+/**
|
|
|
+ * Nose, bottom center.
|
|
|
+ *
|
|
|
+ * Value: "NOSE_BOTTOM_CENTER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_NoseBottomCenter;
|
|
|
+/**
|
|
|
+ * Nose, bottom left.
|
|
|
+ *
|
|
|
+ * Value: "NOSE_BOTTOM_LEFT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_NoseBottomLeft;
|
|
|
+/**
|
|
|
+ * Nose, bottom right.
|
|
|
+ *
|
|
|
+ * Value: "NOSE_BOTTOM_RIGHT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_NoseBottomRight;
|
|
|
+/**
|
|
|
+ * Nose tip.
|
|
|
+ *
|
|
|
+ * Value: "NOSE_TIP"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_NoseTip;
|
|
|
+/**
|
|
|
+ * Right ear tragion.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EAR_TRAGION"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_RightEarTragion;
|
|
|
+/**
|
|
|
+ * Right eye.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_RightEye;
|
|
|
+/**
|
|
|
+ * Right eye, bottom boundary.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_BOTTOM_BOUNDARY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_RightEyeBottomBoundary;
|
|
|
+/**
|
|
|
+ * Right eyebrow, upper midpoint.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYEBROW_UPPER_MIDPOINT"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_RightEyebrowUpperMidpoint;
|
|
|
+/**
|
|
|
+ * Right eye, left corner.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_LEFT_CORNER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_RightEyeLeftCorner;
|
|
|
+/**
|
|
|
+ * Right eye pupil.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_PUPIL"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_RightEyePupil;
|
|
|
+/**
|
|
|
+ * Right eye, right corner.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_RIGHT_CORNER"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_RightEyeRightCorner;
|
|
|
+/**
|
|
|
+ * Right eye, top boundary.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_EYE_TOP_BOUNDARY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_RightEyeTopBoundary;
|
|
|
+/**
|
|
|
+ * Right of left eyebrow.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_OF_LEFT_EYEBROW"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_RightOfLeftEyebrow;
|
|
|
+/**
|
|
|
+ * Right of right eyebrow.
|
|
|
+ *
|
|
|
+ * Value: "RIGHT_OF_RIGHT_EYEBROW"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_RightOfRightEyebrow;
|
|
|
+/**
|
|
|
+ * Unknown face landmark detected. Should not be filled.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN_LANDMARK"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_UnknownLandmark;
|
|
|
+/**
|
|
|
+ * Upper lip.
|
|
|
+ *
|
|
|
+ * Value: "UPPER_LIP"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_Landmark_Type_UpperLip;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_OperationMetadata.state
|
|
|
+
|
|
|
+/**
|
|
|
+ * The batch processing was cancelled.
|
|
|
+ *
|
|
|
+ * Value: "CANCELLED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_OperationMetadata_State_Cancelled;
|
|
|
+/**
|
|
|
+ * Request is received.
|
|
|
+ *
|
|
|
+ * Value: "CREATED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_OperationMetadata_State_Created;
|
|
|
+/**
|
|
|
+ * The batch processing is done.
|
|
|
+ *
|
|
|
+ * Value: "DONE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_OperationMetadata_State_Done;
|
|
|
+/**
|
|
|
+ * Request is actively being processed.
|
|
|
+ *
|
|
|
+ * Value: "RUNNING"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_OperationMetadata_State_Running;
|
|
|
+/**
|
|
|
+ * Invalid.
|
|
|
+ *
|
|
|
+ * Value: "STATE_UNSPECIFIED"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_OperationMetadata_State_StateUnspecified;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_SafeSearchAnnotation.adult
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Adult_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Adult_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Adult_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Adult_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Adult_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Adult_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_SafeSearchAnnotation.medical
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Medical_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Medical_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Medical_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Medical_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Medical_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Medical_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_SafeSearchAnnotation.racy
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Racy_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Racy_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Racy_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Racy_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Racy_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Racy_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_SafeSearchAnnotation.spoof
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Spoof_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Spoof_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Spoof_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Spoof_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Spoof_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Spoof_VeryUnlikely;
|
|
|
+
|
|
|
+// ----------------------------------------------------------------------------
|
|
|
+// GTLRVision_SafeSearchAnnotation.violence
|
|
|
+
|
|
|
+/**
|
|
|
+ * It is likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Violence_Likely;
|
|
|
+/**
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "POSSIBLE"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Violence_Possible;
|
|
|
+/**
|
|
|
+ * Unknown likelihood.
|
|
|
+ *
|
|
|
+ * Value: "UNKNOWN"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Violence_Unknown;
|
|
|
+/**
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Violence_Unlikely;
|
|
|
+/**
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_LIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Violence_VeryLikely;
|
|
|
+/**
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ *
|
|
|
+ * Value: "VERY_UNLIKELY"
|
|
|
+ */
|
|
|
+GTLR_EXTERN NSString * const kGTLRVision_SafeSearchAnnotation_Violence_VeryUnlikely;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Request message for the `AddProductToProductSet` method.
|
|
|
+ */
|
|
|
+@interface GTLRVision_AddProductToProductSetRequest : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The resource name for the Product to be added to this ProductSet.
|
|
|
+ * Format is:
|
|
|
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *product;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response to a single file annotation request. A file may contain one or more
|
|
|
+ * images, which individually have their own responses.
|
|
|
+ */
|
|
|
+@interface GTLRVision_AnnotateFileResponse : GTLRObject
|
|
|
+
|
|
|
+/** Information about the file for which this response is generated. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_InputConfig *inputConfig;
|
|
|
+
|
|
|
+/** Individual responses to images found within the file. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_AnnotateImageResponse *> *responses;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Request for performing Google Cloud Vision API tasks over a user-provided
|
|
|
+ * image, with user-requested features.
|
|
|
+ */
|
|
|
+@interface GTLRVision_AnnotateImageRequest : GTLRObject
|
|
|
+
|
|
|
+/** Requested features. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Feature *> *features;
|
|
|
+
|
|
|
+/** The image to be processed. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_Image *image;
|
|
|
+
|
|
|
+/** Additional context that may accompany the image. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_ImageContext *imageContext;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response to an image annotation request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_AnnotateImageResponse : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * If present, contextual information is needed to understand where this image
|
|
|
+ * comes from.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_ImageAnnotationContext *context;
|
|
|
+
|
|
|
+/** If present, crop hints have completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_CropHintsAnnotation *cropHintsAnnotation;
|
|
|
+
|
|
|
+/**
|
|
|
+ * If set, represents the error message for the operation.
|
|
|
+ * Note that filled-in image annotations are guaranteed to be
|
|
|
+ * correct, even when `error` is set.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_Status *error;
|
|
|
+
|
|
|
+/** If present, face detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_FaceAnnotation *> *faceAnnotations;
|
|
|
+
|
|
|
+/**
|
|
|
+ * If present, text (OCR) detection or document (OCR) text detection has
|
|
|
+ * completed successfully.
|
|
|
+ * This annotation provides the structural hierarchy for the OCR detected
|
|
|
+ * text.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_TextAnnotation *fullTextAnnotation;
|
|
|
+
|
|
|
+/** If present, image properties were extracted successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_ImageProperties *imagePropertiesAnnotation;
|
|
|
+
|
|
|
+/** If present, label detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_EntityAnnotation *> *labelAnnotations;
|
|
|
+
|
|
|
+/** If present, landmark detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_EntityAnnotation *> *landmarkAnnotations;
|
|
|
+
|
|
|
+/**
|
|
|
+ * If present, localized object detection has completed successfully.
|
|
|
+ * This will be sorted descending by confidence score.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_LocalizedObjectAnnotation *> *localizedObjectAnnotations;
|
|
|
+
|
|
|
+/** If present, logo detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_EntityAnnotation *> *logoAnnotations;
|
|
|
+
|
|
|
+/** If present, product search has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_ProductSearchResults *productSearchResults;
|
|
|
+
|
|
|
+/** If present, safe-search annotation has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_SafeSearchAnnotation *safeSearchAnnotation;
|
|
|
+
|
|
|
+/** If present, text (OCR) detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_EntityAnnotation *> *textAnnotations;
|
|
|
+
|
|
|
+/** If present, web detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_WebDetection *webDetection;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * An offline file annotation request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_AsyncAnnotateFileRequest : GTLRObject
|
|
|
+
|
|
|
+/** Required. Requested features. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Feature *> *features;
|
|
|
+
|
|
|
+/** Additional context that may accompany the image(s) in the file. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_ImageContext *imageContext;
|
|
|
+
|
|
|
+/** Required. Information about the input file. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_InputConfig *inputConfig;
|
|
|
+
|
|
|
+/** Required. The desired output location and metadata (e.g. format). */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_OutputConfig *outputConfig;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The response for a single offline file annotation request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_AsyncAnnotateFileResponse : GTLRObject
|
|
|
+
|
|
|
+/** The output location and metadata from AsyncAnnotateFileRequest. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_OutputConfig *outputConfig;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Multiple async file annotation requests are batched into a single service
|
|
|
+ * call.
|
|
|
+ */
|
|
|
+@interface GTLRVision_AsyncBatchAnnotateFilesRequest : GTLRObject
|
|
|
+
|
|
|
+/** Individual async file annotation requests for this batch. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_AsyncAnnotateFileRequest *> *requests;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response to an async batch file annotation request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_AsyncBatchAnnotateFilesResponse : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The list of file annotation responses, one for each request in
|
|
|
+ * AsyncBatchAnnotateFilesRequest.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_AsyncAnnotateFileResponse *> *responses;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Multiple image annotation requests are batched into a single service call.
|
|
|
+ */
|
|
|
+@interface GTLRVision_BatchAnnotateImagesRequest : GTLRObject
|
|
|
+
|
|
|
+/** Individual image annotation requests for this batch. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_AnnotateImageRequest *> *requests;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response to a batch image annotation request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_BatchAnnotateImagesResponse : GTLRObject
|
|
|
+
|
|
|
+/** Individual responses to image annotation requests within the batch. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_AnnotateImageResponse *> *responses;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Metadata for the batch operations such as the current state.
|
|
|
+ * This is included in the `metadata` field of the `Operation` returned by the
|
|
|
+ * `GetOperation` call of the `google::longrunning::Operations` service.
|
|
|
+ */
|
|
|
+@interface GTLRVision_BatchOperationMetadata : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The time when the batch request is finished and
|
|
|
+ * google.longrunning.Operation.done is set to true.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *endTime;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The current state of the batch operation.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_BatchOperationMetadata_State_Cancelled The request is
|
|
|
+ * done after the longrunning.Operations.CancelOperation has
|
|
|
+ * been called by the user. Any records that were processed before the
|
|
|
+ * cancel command are output as specified in the request. (Value:
|
|
|
+ * "CANCELLED")
|
|
|
+ * @arg @c kGTLRVision_BatchOperationMetadata_State_Failed The request is
|
|
|
+ * done and no item has been successfully processed. (Value: "FAILED")
|
|
|
+ * @arg @c kGTLRVision_BatchOperationMetadata_State_Processing Request is
|
|
|
+ * actively being processed. (Value: "PROCESSING")
|
|
|
+ * @arg @c kGTLRVision_BatchOperationMetadata_State_StateUnspecified Invalid.
|
|
|
+ * (Value: "STATE_UNSPECIFIED")
|
|
|
+ * @arg @c kGTLRVision_BatchOperationMetadata_State_Successful The request is
|
|
|
+ * done and at least one item has been successfully
|
|
|
+ * processed. (Value: "SUCCESSFUL")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *state;
|
|
|
+
|
|
|
+/** The time when the batch request was submitted to the server. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *submitTime;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Logical element on the page.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Block : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected block type (text, image etc) for this block.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_Block_BlockType_Barcode Barcode block. (Value:
|
|
|
+ * "BARCODE")
|
|
|
+ * @arg @c kGTLRVision_Block_BlockType_Picture Image block. (Value:
|
|
|
+ * "PICTURE")
|
|
|
+ * @arg @c kGTLRVision_Block_BlockType_Ruler Horizontal/vertical line box.
|
|
|
+ * (Value: "RULER")
|
|
|
+ * @arg @c kGTLRVision_Block_BlockType_Table Table block. (Value: "TABLE")
|
|
|
+ * @arg @c kGTLRVision_Block_BlockType_Text Regular text block. (Value:
|
|
|
+ * "TEXT")
|
|
|
+ * @arg @c kGTLRVision_Block_BlockType_Unknown Unknown block type. (Value:
|
|
|
+ * "UNKNOWN")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *blockType;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding box for the block.
|
|
|
+ * The vertices are in the order of top-left, top-right, bottom-right,
|
|
|
+ * bottom-left. When a rotation of the bounding box is detected the rotation
|
|
|
+ * is represented as around the top-left corner as defined when the text is
|
|
|
+ * read in the 'natural' orientation.
|
|
|
+ * For example:
|
|
|
+ * * when the text is horizontal it might look like:
|
|
|
+ * 0----1
|
|
|
+ * | |
|
|
|
+ * 3----2
|
|
|
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
|
|
|
+ * 2----3
|
|
|
+ * | |
|
|
|
+ * 1----0
|
|
|
+ * and the vertex order will still be (0, 1, 2, 3).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_BoundingPoly *boundingBox;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results on the block. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/** List of paragraphs in this block (if this blocks is of type text). */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Paragraph *> *paragraphs;
|
|
|
+
|
|
|
+/** Additional information detected for the block. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_TextProperty *property;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A bounding polygon for the detected image annotation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_BoundingPoly : GTLRObject
|
|
|
+
|
|
|
+/** The bounding polygon normalized vertices. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_NormalizedVertex *> *normalizedVertices;
|
|
|
+
|
|
|
+/** The bounding polygon vertices. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Vertex *> *vertices;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The request message for Operations.CancelOperation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_CancelOperationRequest : GTLRObject
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Represents a color in the RGBA color space. This representation is designed
|
|
|
+ * for simplicity of conversion to/from color representations in various
|
|
|
+ * languages over compactness; for example, the fields of this representation
|
|
|
+ * can be trivially provided to the constructor of "java.awt.Color" in Java; it
|
|
|
+ * can also be trivially provided to UIColor's "+colorWithRed:green:blue:alpha"
|
|
|
+ * method in iOS; and, with just a little work, it can be easily formatted into
|
|
|
+ * a CSS "rgba()" string in JavaScript, as well. Here are some examples:
|
|
|
+ * Example (Java):
|
|
|
+ * import com.google.type.Color;
|
|
|
+ * // ...
|
|
|
+ * public static java.awt.Color fromProto(Color protocolor) {
|
|
|
+ * float alpha = protocolor.hasAlpha()
|
|
|
+ * ? protocolor.getAlpha().getValue()
|
|
|
+ * : 1.0;
|
|
|
+ * return new java.awt.Color(
|
|
|
+ * protocolor.getRed(),
|
|
|
+ * protocolor.getGreen(),
|
|
|
+ * protocolor.getBlue(),
|
|
|
+ * alpha);
|
|
|
+ * }
|
|
|
+ * public static Color toProto(java.awt.Color color) {
|
|
|
+ * float red = (float) color.getRed();
|
|
|
+ * float green = (float) color.getGreen();
|
|
|
+ * float blue = (float) color.getBlue();
|
|
|
+ * float denominator = 255.0;
|
|
|
+ * Color.Builder resultBuilder =
|
|
|
+ * Color
|
|
|
+ * .newBuilder()
|
|
|
+ * .setRed(red / denominator)
|
|
|
+ * .setGreen(green / denominator)
|
|
|
+ * .setBlue(blue / denominator);
|
|
|
+ * int alpha = color.getAlpha();
|
|
|
+ * if (alpha != 255) {
|
|
|
+ * result.setAlpha(
|
|
|
+ * FloatValue
|
|
|
+ * .newBuilder()
|
|
|
+ * .setValue(((float) alpha) / denominator)
|
|
|
+ * .build());
|
|
|
+ * }
|
|
|
+ * return resultBuilder.build();
|
|
|
+ * }
|
|
|
+ * // ...
|
|
|
+ * Example (iOS / Obj-C):
|
|
|
+ * // ...
|
|
|
+ * static UIColor* fromProto(Color* protocolor) {
|
|
|
+ * float red = [protocolor red];
|
|
|
+ * float green = [protocolor green];
|
|
|
+ * float blue = [protocolor blue];
|
|
|
+ * FloatValue* alpha_wrapper = [protocolor alpha];
|
|
|
+ * float alpha = 1.0;
|
|
|
+ * if (alpha_wrapper != nil) {
|
|
|
+ * alpha = [alpha_wrapper value];
|
|
|
+ * }
|
|
|
+ * return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
|
|
|
+ * }
|
|
|
+ * static Color* toProto(UIColor* color) {
|
|
|
+ * CGFloat red, green, blue, alpha;
|
|
|
+ * if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) {
|
|
|
+ * return nil;
|
|
|
+ * }
|
|
|
+ * Color* result = [[Color alloc] init];
|
|
|
+ * [result setRed:red];
|
|
|
+ * [result setGreen:green];
|
|
|
+ * [result setBlue:blue];
|
|
|
+ * if (alpha <= 0.9999) {
|
|
|
+ * [result setAlpha:floatWrapperWithValue(alpha)];
|
|
|
+ * }
|
|
|
+ * [result autorelease];
|
|
|
+ * return result;
|
|
|
+ * }
|
|
|
+ * // ...
|
|
|
+ * Example (JavaScript):
|
|
|
+ * // ...
|
|
|
+ * var protoToCssColor = function(rgb_color) {
|
|
|
+ * var redFrac = rgb_color.red || 0.0;
|
|
|
+ * var greenFrac = rgb_color.green || 0.0;
|
|
|
+ * var blueFrac = rgb_color.blue || 0.0;
|
|
|
+ * var red = Math.floor(redFrac * 255);
|
|
|
+ * var green = Math.floor(greenFrac * 255);
|
|
|
+ * var blue = Math.floor(blueFrac * 255);
|
|
|
+ * if (!('alpha' in rgb_color)) {
|
|
|
+ * return rgbToCssColor_(red, green, blue);
|
|
|
+ * }
|
|
|
+ * var alphaFrac = rgb_color.alpha.value || 0.0;
|
|
|
+ * var rgbParams = [red, green, blue].join(',');
|
|
|
+ * return ['rgba(', rgbParams, ',', alphaFrac, ')'].join('');
|
|
|
+ * };
|
|
|
+ * var rgbToCssColor_ = function(red, green, blue) {
|
|
|
+ * var rgbNumber = new Number((red << 16) | (green << 8) | blue);
|
|
|
+ * var hexString = rgbNumber.toString(16);
|
|
|
+ * var missingZeros = 6 - hexString.length;
|
|
|
+ * var resultBuilder = ['#'];
|
|
|
+ * for (var i = 0; i < missingZeros; i++) {
|
|
|
+ * resultBuilder.push('0');
|
|
|
+ * }
|
|
|
+ * resultBuilder.push(hexString);
|
|
|
+ * return resultBuilder.join('');
|
|
|
+ * };
|
|
|
+ * // ...
|
|
|
+ */
|
|
|
+@interface GTLRVision_Color : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The fraction of this color that should be applied to the pixel. That is,
|
|
|
+ * the final pixel color is defined by the equation:
|
|
|
+ * pixel color = alpha * (this color) + (1.0 - alpha) * (background color)
|
|
|
+ * This means that a value of 1.0 corresponds to a solid color, whereas
|
|
|
+ * a value of 0.0 corresponds to a completely transparent color. This
|
|
|
+ * uses a wrapper message rather than a simple float scalar so that it is
|
|
|
+ * possible to distinguish between a default value and the value being unset.
|
|
|
+ * If omitted, this color object is to be rendered as a solid color
|
|
|
+ * (as if the alpha value had been explicitly given with a value of 1.0).
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *alpha;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The amount of blue in the color as a value in the interval [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *blue;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The amount of green in the color as a value in the interval [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *green;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The amount of red in the color as a value in the interval [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *red;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Color information consists of RGB channels, score, and the fraction of
|
|
|
+ * the image that the color occupies in the image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_ColorInfo : GTLRObject
|
|
|
+
|
|
|
+/** RGB components of the color. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_Color *color;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The fraction of pixels the color occupies in the image.
|
|
|
+ * Value in range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *pixelFraction;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Image-specific score for this color. Value in range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Single crop hint that is used to generate a new crop when serving an image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_CropHint : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding polygon for the crop region. The coordinates of the bounding
|
|
|
+ * box are in the original image's scale.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of this being a salient region. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Fraction of importance of this salient region with respect to the original
|
|
|
+ * image.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *importanceFraction;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of crop hints that are used to generate new crops when serving images.
|
|
|
+ */
|
|
|
+@interface GTLRVision_CropHintsAnnotation : GTLRObject
|
|
|
+
|
|
|
+/** Crop hint results. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_CropHint *> *cropHints;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Parameters for crop hints annotation request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_CropHintsParams : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Aspect ratios in floats, representing the ratio of the width to the height
|
|
|
+ * of the image. For example, if the desired aspect ratio is 4/3, the
|
|
|
+ * corresponding float value should be 1.33333. If not specified, the
|
|
|
+ * best possible crop is returned. The number of provided aspect ratios is
|
|
|
+ * limited to a maximum of 16; any aspect ratios provided after the 16th are
|
|
|
+ * ignored.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<NSNumber *> *aspectRatios;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected start or end of a structural component.
|
|
|
+ */
|
|
|
+@interface GTLRVision_DetectedBreak : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * True if break prepends the element.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of boolValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *isPrefix;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected break type.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_DetectedBreak_Type_EolSureSpace Line-wrapping break.
|
|
|
+ * (Value: "EOL_SURE_SPACE")
|
|
|
+ * @arg @c kGTLRVision_DetectedBreak_Type_Hyphen End-line hyphen that is not
|
|
|
+ * present in text; does not co-occur with
|
|
|
+ * `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`. (Value: "HYPHEN")
|
|
|
+ * @arg @c kGTLRVision_DetectedBreak_Type_LineBreak Line break that ends a
|
|
|
+ * paragraph. (Value: "LINE_BREAK")
|
|
|
+ * @arg @c kGTLRVision_DetectedBreak_Type_Space Regular space. (Value:
|
|
|
+ * "SPACE")
|
|
|
+ * @arg @c kGTLRVision_DetectedBreak_Type_SureSpace Sure space (very wide).
|
|
|
+ * (Value: "SURE_SPACE")
|
|
|
+ * @arg @c kGTLRVision_DetectedBreak_Type_Unknown Unknown break label type.
|
|
|
+ * (Value: "UNKNOWN")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *type;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected language for a structural component.
|
|
|
+ */
|
|
|
+@interface GTLRVision_DetectedLanguage : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of detected language. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The BCP-47 language code, such as "en-US" or "sr-Latn". For more
|
|
|
+ * information, see
|
|
|
+ * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *languageCode;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of dominant colors and their corresponding scores.
|
|
|
+ */
|
|
|
+@interface GTLRVision_DominantColorsAnnotation : GTLRObject
|
|
|
+
|
|
|
+/** RGB color values with their score and pixel fraction. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_ColorInfo *> *colors;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A generic empty message that you can re-use to avoid defining duplicated
|
|
|
+ * empty messages in your APIs. A typical example is to use it as the request
|
|
|
+ * or the response type of an API method. For instance:
|
|
|
+ * service Foo {
|
|
|
+ * rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
|
|
|
+ * }
|
|
|
+ * The JSON representation for `Empty` is empty JSON object `{}`.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Empty : GTLRObject
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of detected entity features.
|
|
|
+ */
|
|
|
+@interface GTLRVision_EntityAnnotation : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Image region to which this entity belongs. Not produced
|
|
|
+ * for `LABEL_DETECTION` features.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * **Deprecated. Use `score` instead.**
|
|
|
+ * The accuracy of the entity detection in an image.
|
|
|
+ * For example, for an image in which the "Eiffel Tower" entity is detected,
|
|
|
+ * this field represents the confidence that there is a tower in the query
|
|
|
+ * image. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Entity textual description, expressed in its `locale` language.
|
|
|
+ *
|
|
|
+ * Remapped to 'descriptionProperty' to avoid NSObject's 'description'.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *descriptionProperty;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The language code for the locale in which the entity textual
|
|
|
+ * `description` is expressed.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *locale;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The location information for the detected entity. Multiple
|
|
|
+ * `LocationInfo` elements can be present because one location may
|
|
|
+ * indicate the location of the scene in the image, and another location
|
|
|
+ * may indicate the location of the place where the image was taken.
|
|
|
+ * Location information is usually present for landmarks.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_LocationInfo *> *locations;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Opaque entity ID. Some IDs may be available in
|
|
|
+ * [Google Knowledge Graph Search
|
|
|
+ * API](https://developers.google.com/knowledge-graph/).
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *mid;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Some entities may have optional user-supplied `Property` (name/value)
|
|
|
+ * fields, such a score or string that qualifies the entity.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Property *> *properties;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Overall score of the result. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The relevancy of the ICA (Image Content Annotation) label to the
|
|
|
+ * image. For example, the relevancy of "tower" is likely higher to an image
|
|
|
+ * containing the detected "Eiffel Tower" than to an image containing a
|
|
|
+ * detected distant towering building, even though the confidence that
|
|
|
+ * there is a tower in each image may be the same. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *topicality;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A face annotation object contains the results of face detection.
|
|
|
+ */
|
|
|
+@interface GTLRVision_FaceAnnotation : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Anger likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_AngerLikelihood_Likely It is likely
|
|
|
+ * that the image belongs to the specified vertical. (Value: "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_AngerLikelihood_Possible It is possible
|
|
|
+ * that the image belongs to the specified vertical. (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_AngerLikelihood_Unknown Unknown
|
|
|
+ * likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_AngerLikelihood_Unlikely It is unlikely
|
|
|
+ * that the image belongs to the specified vertical. (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_AngerLikelihood_VeryLikely It is very
|
|
|
+ * likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_AngerLikelihood_VeryUnlikely It is very
|
|
|
+ * unlikely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *angerLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Blurred likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_BlurredLikelihood_Likely It is likely
|
|
|
+ * that the image belongs to the specified vertical. (Value: "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_BlurredLikelihood_Possible It is
|
|
|
+ * possible that the image belongs to the specified vertical. (Value:
|
|
|
+ * "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_BlurredLikelihood_Unknown Unknown
|
|
|
+ * likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_BlurredLikelihood_Unlikely It is
|
|
|
+ * unlikely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_BlurredLikelihood_VeryLikely It is very
|
|
|
+ * likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_BlurredLikelihood_VeryUnlikely It is
|
|
|
+ * very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *blurredLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding polygon around the face. The coordinates of the bounding box
|
|
|
+ * are in the original image's scale.
|
|
|
+ * The bounding box is computed to "frame" the face in accordance with human
|
|
|
+ * expectations. It is based on the landmarker results.
|
|
|
+ * Note that one or more x and/or y coordinates may not be generated in the
|
|
|
+ * `BoundingPoly` (the polygon will be unbounded) if only a partial face
|
|
|
+ * appears in the image to be annotated.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detection confidence. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *detectionConfidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The `fd_bounding_poly` bounding polygon is tighter than the
|
|
|
+ * `boundingPoly`, and encloses only the skin part of the face. Typically, it
|
|
|
+ * is used to eliminate the face from any image analysis that detects the
|
|
|
+ * "amount of skin" visible in an image. It is not based on the
|
|
|
+ * landmarker results, only on the initial face detection, hence
|
|
|
+ * the <code>fd</code> (face detection) prefix.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_BoundingPoly *fdBoundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Headwear likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_HeadwearLikelihood_Likely It is likely
|
|
|
+ * that the image belongs to the specified vertical. (Value: "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_HeadwearLikelihood_Possible It is
|
|
|
+ * possible that the image belongs to the specified vertical. (Value:
|
|
|
+ * "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_HeadwearLikelihood_Unknown Unknown
|
|
|
+ * likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_HeadwearLikelihood_Unlikely It is
|
|
|
+ * unlikely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_HeadwearLikelihood_VeryLikely It is
|
|
|
+ * very likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_HeadwearLikelihood_VeryUnlikely It is
|
|
|
+ * very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *headwearLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Joy likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_JoyLikelihood_Likely It is likely that
|
|
|
+ * the image belongs to the specified vertical. (Value: "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_JoyLikelihood_Possible It is possible
|
|
|
+ * that the image belongs to the specified vertical. (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_JoyLikelihood_Unknown Unknown
|
|
|
+ * likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_JoyLikelihood_Unlikely It is unlikely
|
|
|
+ * that the image belongs to the specified vertical. (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_JoyLikelihood_VeryLikely It is very
|
|
|
+ * likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_JoyLikelihood_VeryUnlikely It is very
|
|
|
+ * unlikely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *joyLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Face landmarking confidence. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *landmarkingConfidence;
|
|
|
+
|
|
|
+/** Detected face landmarks. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Landmark *> *landmarks;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Yaw angle, which indicates the leftward/rightward angle that the face is
|
|
|
+ * pointing relative to the vertical plane perpendicular to the image. Range
|
|
|
+ * [-180,180].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *panAngle;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
|
|
|
+ * of the face relative to the image vertical about the axis perpendicular to
|
|
|
+ * the face. Range [-180,180].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *rollAngle;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Sorrow likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_SorrowLikelihood_Likely It is likely
|
|
|
+ * that the image belongs to the specified vertical. (Value: "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_SorrowLikelihood_Possible It is
|
|
|
+ * possible that the image belongs to the specified vertical. (Value:
|
|
|
+ * "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_SorrowLikelihood_Unknown Unknown
|
|
|
+ * likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_SorrowLikelihood_Unlikely It is
|
|
|
+ * unlikely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_SorrowLikelihood_VeryLikely It is very
|
|
|
+ * likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_SorrowLikelihood_VeryUnlikely It is
|
|
|
+ * very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *sorrowLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Surprise likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_SurpriseLikelihood_Likely It is likely
|
|
|
+ * that the image belongs to the specified vertical. (Value: "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_SurpriseLikelihood_Possible It is
|
|
|
+ * possible that the image belongs to the specified vertical. (Value:
|
|
|
+ * "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_SurpriseLikelihood_Unknown Unknown
|
|
|
+ * likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_SurpriseLikelihood_Unlikely It is
|
|
|
+ * unlikely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_SurpriseLikelihood_VeryLikely It is
|
|
|
+ * very likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_SurpriseLikelihood_VeryUnlikely It is
|
|
|
+ * very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *surpriseLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Pitch angle, which indicates the upwards/downwards angle that the face is
|
|
|
+ * pointing relative to the image's horizontal plane. Range [-180,180].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *tiltAngle;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Under-exposed likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_UnderExposedLikelihood_Likely It is
|
|
|
+ * likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_UnderExposedLikelihood_Possible It is
|
|
|
+ * possible that the image belongs to the specified vertical. (Value:
|
|
|
+ * "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_UnderExposedLikelihood_Unknown Unknown
|
|
|
+ * likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_UnderExposedLikelihood_Unlikely It is
|
|
|
+ * unlikely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_UnderExposedLikelihood_VeryLikely It is
|
|
|
+ * very likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_FaceAnnotation_UnderExposedLikelihood_VeryUnlikely It
|
|
|
+ * is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *underExposedLikelihood;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The type of Google Cloud Vision API detection to perform, and the maximum
|
|
|
+ * number of results to return for that type. Multiple `Feature` objects can
|
|
|
+ * be specified in the `features` list.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Feature : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Maximum number of results of this type. Does not apply to
|
|
|
+ * `TEXT_DETECTION`, `DOCUMENT_TEXT_DETECTION`, or `CROP_HINTS`.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *maxResults;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Model to use for the feature.
|
|
|
+ * Supported values: "builtin/stable" (the default if unset) and
|
|
|
+ * "builtin/latest".
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *model;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The feature type.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_Feature_Type_CropHints Run crop hints. (Value:
|
|
|
+ * "CROP_HINTS")
|
|
|
+ * @arg @c kGTLRVision_Feature_Type_DocumentTextDetection Run dense text
|
|
|
+ * document OCR. Takes precedence when both
|
|
|
+ * `DOCUMENT_TEXT_DETECTION` and `TEXT_DETECTION` are present. (Value:
|
|
|
+ * "DOCUMENT_TEXT_DETECTION")
|
|
|
+ * @arg @c kGTLRVision_Feature_Type_FaceDetection Run face detection. (Value:
|
|
|
+ * "FACE_DETECTION")
|
|
|
+ * @arg @c kGTLRVision_Feature_Type_ImageProperties Compute a set of image
|
|
|
+ * properties, such as the
|
|
|
+ * image's dominant colors. (Value: "IMAGE_PROPERTIES")
|
|
|
+ * @arg @c kGTLRVision_Feature_Type_LabelDetection Run label detection.
|
|
|
+ * (Value: "LABEL_DETECTION")
|
|
|
+ * @arg @c kGTLRVision_Feature_Type_LandmarkDetection Run landmark detection.
|
|
|
+ * (Value: "LANDMARK_DETECTION")
|
|
|
+ * @arg @c kGTLRVision_Feature_Type_LogoDetection Run logo detection. (Value:
|
|
|
+ * "LOGO_DETECTION")
|
|
|
+ * @arg @c kGTLRVision_Feature_Type_ObjectLocalization Run localizer for
|
|
|
+ * object detection. (Value: "OBJECT_LOCALIZATION")
|
|
|
+ * @arg @c kGTLRVision_Feature_Type_ProductSearch Run Product Search. (Value:
|
|
|
+ * "PRODUCT_SEARCH")
|
|
|
+ * @arg @c kGTLRVision_Feature_Type_SafeSearchDetection Run Safe Search to
|
|
|
+ * detect potentially unsafe
|
|
|
+ * or undesirable content. (Value: "SAFE_SEARCH_DETECTION")
|
|
|
+ * @arg @c kGTLRVision_Feature_Type_TextDetection Run text detection /
|
|
|
+ * optical character recognition (OCR). Text detection
|
|
|
+ * is optimized for areas of text within a larger image; if the image is
|
|
|
+ * a document, use `DOCUMENT_TEXT_DETECTION` instead. (Value:
|
|
|
+ * "TEXT_DETECTION")
|
|
|
+ * @arg @c kGTLRVision_Feature_Type_TypeUnspecified Unspecified feature type.
|
|
|
+ * (Value: "TYPE_UNSPECIFIED")
|
|
|
+ * @arg @c kGTLRVision_Feature_Type_WebDetection Run web detection. (Value:
|
|
|
+ * "WEB_DETECTION")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *type;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The Google Cloud Storage location where the output will be written to.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GcsDestination : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Google Cloud Storage URI where the results will be stored. Results will
|
|
|
+ * be in JSON format and preceded by its corresponding input URI. This field
|
|
|
+ * can either represent a single file, or a prefix for multiple outputs.
|
|
|
+ * Prefixes must end in a `/`.
|
|
|
+ * Examples:
|
|
|
+ * * File: gs://bucket-name/filename.json
|
|
|
+ * * Prefix: gs://bucket-name/prefix/here/
|
|
|
+ * * File: gs://bucket-name/prefix/here
|
|
|
+ * If multiple outputs, each response is still AnnotateFileResponse, each of
|
|
|
+ * which contains some subset of the full list of AnnotateImageResponse.
|
|
|
+ * Multiple outputs can happen if, for example, the output JSON is too large
|
|
|
+ * and overflows into multiple sharded files.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *uri;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The Google Cloud Storage location where the input will be read from.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GcsSource : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Google Cloud Storage URI for the input file. This must only be a
|
|
|
+ * Google Cloud Storage object. Wildcards are not currently supported.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *uri;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response to a single file annotation request. A file may contain one or more
|
|
|
+ * images, which individually have their own responses.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1AnnotateFileResponse : GTLRObject
|
|
|
+
|
|
|
+/** Information about the file for which this response is generated. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1InputConfig *inputConfig;
|
|
|
+
|
|
|
+/** Individual responses to images found within the file. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1AnnotateImageResponse *> *responses;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response to an image annotation request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1AnnotateImageResponse : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * If present, contextual information is needed to understand where this image
|
|
|
+ * comes from.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1ImageAnnotationContext *context;
|
|
|
+
|
|
|
+/** If present, crop hints have completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1CropHintsAnnotation *cropHintsAnnotation;
|
|
|
+
|
|
|
+/**
|
|
|
+ * If set, represents the error message for the operation.
|
|
|
+ * Note that filled-in image annotations are guaranteed to be
|
|
|
+ * correct, even when `error` is set.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_Status *error;
|
|
|
+
|
|
|
+/** If present, face detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation *> *faceAnnotations;
|
|
|
+
|
|
|
+/**
|
|
|
+ * If present, text (OCR) detection or document (OCR) text detection has
|
|
|
+ * completed successfully.
|
|
|
+ * This annotation provides the structural hierarchy for the OCR detected
|
|
|
+ * text.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotation *fullTextAnnotation;
|
|
|
+
|
|
|
+/** If present, image properties were extracted successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1ImageProperties *imagePropertiesAnnotation;
|
|
|
+
|
|
|
+/** If present, label detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1EntityAnnotation *> *labelAnnotations;
|
|
|
+
|
|
|
+/** If present, landmark detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1EntityAnnotation *> *landmarkAnnotations;
|
|
|
+
|
|
|
+/**
|
|
|
+ * If present, localized object detection has completed successfully.
|
|
|
+ * This will be sorted descending by confidence score.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1LocalizedObjectAnnotation *> *localizedObjectAnnotations;
|
|
|
+
|
|
|
+/** If present, logo detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1EntityAnnotation *> *logoAnnotations;
|
|
|
+
|
|
|
+/** If present, product search has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1ProductSearchResults *productSearchResults;
|
|
|
+
|
|
|
+/** If present, safe-search annotation has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation *safeSearchAnnotation;
|
|
|
+
|
|
|
+/** If present, text (OCR) detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1EntityAnnotation *> *textAnnotations;
|
|
|
+
|
|
|
+/** If present, web detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1WebDetection *webDetection;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The response for a single offline file annotation request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1AsyncAnnotateFileResponse : GTLRObject
|
|
|
+
|
|
|
+/** The output location and metadata from AsyncAnnotateFileRequest. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1OutputConfig *outputConfig;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response to an async batch file annotation request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1AsyncBatchAnnotateFilesResponse : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The list of file annotation responses, one for each request in
|
|
|
+ * AsyncBatchAnnotateFilesRequest.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1AsyncAnnotateFileResponse *> *responses;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Logical element on the page.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1Block : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected block type (text, image etc) for this block.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1Block_BlockType_Barcode
|
|
|
+ * Barcode block. (Value: "BARCODE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1Block_BlockType_Picture
|
|
|
+ * Image block. (Value: "PICTURE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1Block_BlockType_Ruler
|
|
|
+ * Horizontal/vertical line box. (Value: "RULER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1Block_BlockType_Table Table
|
|
|
+ * block. (Value: "TABLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1Block_BlockType_Text Regular
|
|
|
+ * text block. (Value: "TEXT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1Block_BlockType_Unknown
|
|
|
+ * Unknown block type. (Value: "UNKNOWN")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *blockType;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding box for the block.
|
|
|
+ * The vertices are in the order of top-left, top-right, bottom-right,
|
|
|
+ * bottom-left. When a rotation of the bounding box is detected the rotation
|
|
|
+ * is represented as around the top-left corner as defined when the text is
|
|
|
+ * read in the 'natural' orientation.
|
|
|
+ * For example:
|
|
|
+ * * when the text is horizontal it might look like:
|
|
|
+ * 0----1
|
|
|
+ * | |
|
|
|
+ * 3----2
|
|
|
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
|
|
|
+ * 2----3
|
|
|
+ * | |
|
|
|
+ * 1----0
|
|
|
+ * and the vertex order will still be (0, 1, 2, 3).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1BoundingPoly *boundingBox;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results on the block. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/** List of paragraphs in this block (if this blocks is of type text). */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1Paragraph *> *paragraphs;
|
|
|
+
|
|
|
+/** Additional information detected for the block. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationTextProperty *property;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A bounding polygon for the detected image annotation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1BoundingPoly : GTLRObject
|
|
|
+
|
|
|
+/** The bounding polygon normalized vertices. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1NormalizedVertex *> *normalizedVertices;
|
|
|
+
|
|
|
+/** The bounding polygon vertices. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1Vertex *> *vertices;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Color information consists of RGB channels, score, and the fraction of
|
|
|
+ * the image that the color occupies in the image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1ColorInfo : GTLRObject
|
|
|
+
|
|
|
+/** RGB components of the color. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_Color *color;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The fraction of pixels the color occupies in the image.
|
|
|
+ * Value in range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *pixelFraction;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Image-specific score for this color. Value in range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Single crop hint that is used to generate a new crop when serving an image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1CropHint : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding polygon for the crop region. The coordinates of the bounding
|
|
|
+ * box are in the original image's scale.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of this being a salient region. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Fraction of importance of this salient region with respect to the original
|
|
|
+ * image.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *importanceFraction;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of crop hints that are used to generate new crops when serving images.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1CropHintsAnnotation : GTLRObject
|
|
|
+
|
|
|
+/** Crop hint results. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1CropHint *> *cropHints;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of dominant colors and their corresponding scores.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1DominantColorsAnnotation : GTLRObject
|
|
|
+
|
|
|
+/** RGB color values with their score and pixel fraction. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1ColorInfo *> *colors;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of detected entity features.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1EntityAnnotation : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Image region to which this entity belongs. Not produced
|
|
|
+ * for `LABEL_DETECTION` features.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * **Deprecated. Use `score` instead.**
|
|
|
+ * The accuracy of the entity detection in an image.
|
|
|
+ * For example, for an image in which the "Eiffel Tower" entity is detected,
|
|
|
+ * this field represents the confidence that there is a tower in the query
|
|
|
+ * image. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Entity textual description, expressed in its `locale` language.
|
|
|
+ *
|
|
|
+ * Remapped to 'descriptionProperty' to avoid NSObject's 'description'.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *descriptionProperty;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The language code for the locale in which the entity textual
|
|
|
+ * `description` is expressed.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *locale;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The location information for the detected entity. Multiple
|
|
|
+ * `LocationInfo` elements can be present because one location may
|
|
|
+ * indicate the location of the scene in the image, and another location
|
|
|
+ * may indicate the location of the place where the image was taken.
|
|
|
+ * Location information is usually present for landmarks.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1LocationInfo *> *locations;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Opaque entity ID. Some IDs may be available in
|
|
|
+ * [Google Knowledge Graph Search
|
|
|
+ * API](https://developers.google.com/knowledge-graph/).
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *mid;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Some entities may have optional user-supplied `Property` (name/value)
|
|
|
+ * fields, such a score or string that qualifies the entity.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1Property *> *properties;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Overall score of the result. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The relevancy of the ICA (Image Content Annotation) label to the
|
|
|
+ * image. For example, the relevancy of "tower" is likely higher to an image
|
|
|
+ * containing the detected "Eiffel Tower" than to an image containing a
|
|
|
+ * detected distant towering building, even though the confidence that
|
|
|
+ * there is a tower in each image may be the same. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *topicality;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A face annotation object contains the results of face detection.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Anger likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_AngerLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_AngerLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_AngerLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_AngerLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_AngerLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_AngerLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *angerLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Blurred likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_BlurredLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_BlurredLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_BlurredLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_BlurredLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_BlurredLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_BlurredLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *blurredLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding polygon around the face. The coordinates of the bounding box
|
|
|
+ * are in the original image's scale.
|
|
|
+ * The bounding box is computed to "frame" the face in accordance with human
|
|
|
+ * expectations. It is based on the landmarker results.
|
|
|
+ * Note that one or more x and/or y coordinates may not be generated in the
|
|
|
+ * `BoundingPoly` (the polygon will be unbounded) if only a partial face
|
|
|
+ * appears in the image to be annotated.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detection confidence. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *detectionConfidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The `fd_bounding_poly` bounding polygon is tighter than the
|
|
|
+ * `boundingPoly`, and encloses only the skin part of the face. Typically, it
|
|
|
+ * is used to eliminate the face from any image analysis that detects the
|
|
|
+ * "amount of skin" visible in an image. It is not based on the
|
|
|
+ * landmarker results, only on the initial face detection, hence
|
|
|
+ * the <code>fd</code> (face detection) prefix.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1BoundingPoly *fdBoundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Headwear likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_HeadwearLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_HeadwearLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_HeadwearLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_HeadwearLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_HeadwearLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_HeadwearLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *headwearLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Joy likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_JoyLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_JoyLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_JoyLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_JoyLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_JoyLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_JoyLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *joyLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Face landmarking confidence. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *landmarkingConfidence;
|
|
|
+
|
|
|
+/** Detected face landmarks. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark *> *landmarks;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Yaw angle, which indicates the leftward/rightward angle that the face is
|
|
|
+ * pointing relative to the vertical plane perpendicular to the image. Range
|
|
|
+ * [-180,180].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *panAngle;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
|
|
|
+ * of the face relative to the image vertical about the axis perpendicular to
|
|
|
+ * the face. Range [-180,180].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *rollAngle;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Sorrow likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SorrowLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SorrowLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SorrowLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SorrowLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SorrowLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SorrowLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *sorrowLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Surprise likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SurpriseLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SurpriseLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SurpriseLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SurpriseLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SurpriseLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_SurpriseLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *surpriseLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Pitch angle, which indicates the upwards/downwards angle that the face is
|
|
|
+ * pointing relative to the image's horizontal plane. Range [-180,180].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *tiltAngle;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Under-exposed likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_UnderExposedLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_UnderExposedLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_UnderExposedLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_UnderExposedLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_UnderExposedLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotation_UnderExposedLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *underExposedLikelihood;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A face-specific landmark (for example, a face feature).
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark : GTLRObject
|
|
|
+
|
|
|
+/** Face landmark position. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1Position *position;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Face landmark type.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_ChinGnathion
|
|
|
+ * Chin gnathion. (Value: "CHIN_GNATHION")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_ChinLeftGonion
|
|
|
+ * Chin left gonion. (Value: "CHIN_LEFT_GONION")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_ChinRightGonion
|
|
|
+ * Chin right gonion. (Value: "CHIN_RIGHT_GONION")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_ForeheadGlabella
|
|
|
+ * Forehead glabella. (Value: "FOREHEAD_GLABELLA")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftEarTragion
|
|
|
+ * Left ear tragion. (Value: "LEFT_EAR_TRAGION")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftEye
|
|
|
+ * Left eye. (Value: "LEFT_EYE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftEyeBottomBoundary
|
|
|
+ * Left eye, bottom boundary. (Value: "LEFT_EYE_BOTTOM_BOUNDARY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftEyebrowUpperMidpoint
|
|
|
+ * Left eyebrow, upper midpoint. (Value: "LEFT_EYEBROW_UPPER_MIDPOINT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftEyeLeftCorner
|
|
|
+ * Left eye, left corner. (Value: "LEFT_EYE_LEFT_CORNER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftEyePupil
|
|
|
+ * Left eye pupil. (Value: "LEFT_EYE_PUPIL")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftEyeRightCorner
|
|
|
+ * Left eye, right corner. (Value: "LEFT_EYE_RIGHT_CORNER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftEyeTopBoundary
|
|
|
+ * Left eye, top boundary. (Value: "LEFT_EYE_TOP_BOUNDARY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftOfLeftEyebrow
|
|
|
+ * Left of left eyebrow. (Value: "LEFT_OF_LEFT_EYEBROW")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LeftOfRightEyebrow
|
|
|
+ * Left of right eyebrow. (Value: "LEFT_OF_RIGHT_EYEBROW")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_LowerLip
|
|
|
+ * Lower lip. (Value: "LOWER_LIP")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_MidpointBetweenEyes
|
|
|
+ * Midpoint between eyes. (Value: "MIDPOINT_BETWEEN_EYES")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_MouthCenter
|
|
|
+ * Mouth center. (Value: "MOUTH_CENTER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_MouthLeft
|
|
|
+ * Mouth left. (Value: "MOUTH_LEFT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_MouthRight
|
|
|
+ * Mouth right. (Value: "MOUTH_RIGHT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_NoseBottomCenter
|
|
|
+ * Nose, bottom center. (Value: "NOSE_BOTTOM_CENTER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_NoseBottomLeft
|
|
|
+ * Nose, bottom left. (Value: "NOSE_BOTTOM_LEFT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_NoseBottomRight
|
|
|
+ * Nose, bottom right. (Value: "NOSE_BOTTOM_RIGHT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_NoseTip
|
|
|
+ * Nose tip. (Value: "NOSE_TIP")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightEarTragion
|
|
|
+ * Right ear tragion. (Value: "RIGHT_EAR_TRAGION")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightEye
|
|
|
+ * Right eye. (Value: "RIGHT_EYE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightEyeBottomBoundary
|
|
|
+ * Right eye, bottom boundary. (Value: "RIGHT_EYE_BOTTOM_BOUNDARY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightEyebrowUpperMidpoint
|
|
|
+ * Right eyebrow, upper midpoint. (Value: "RIGHT_EYEBROW_UPPER_MIDPOINT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightEyeLeftCorner
|
|
|
+ * Right eye, left corner. (Value: "RIGHT_EYE_LEFT_CORNER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightEyePupil
|
|
|
+ * Right eye pupil. (Value: "RIGHT_EYE_PUPIL")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightEyeRightCorner
|
|
|
+ * Right eye, right corner. (Value: "RIGHT_EYE_RIGHT_CORNER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightEyeTopBoundary
|
|
|
+ * Right eye, top boundary. (Value: "RIGHT_EYE_TOP_BOUNDARY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightOfLeftEyebrow
|
|
|
+ * Right of left eyebrow. (Value: "RIGHT_OF_LEFT_EYEBROW")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_RightOfRightEyebrow
|
|
|
+ * Right of right eyebrow. (Value: "RIGHT_OF_RIGHT_EYEBROW")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_UnknownLandmark
|
|
|
+ * Unknown face landmark detected. Should not be filled. (Value:
|
|
|
+ * "UNKNOWN_LANDMARK")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1FaceAnnotationLandmark_Type_UpperLip
|
|
|
+ * Upper lip. (Value: "UPPER_LIP")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *type;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The Google Cloud Storage location where the output will be written to.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1GcsDestination : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Google Cloud Storage URI where the results will be stored. Results will
|
|
|
+ * be in JSON format and preceded by its corresponding input URI. This field
|
|
|
+ * can either represent a single file, or a prefix for multiple outputs.
|
|
|
+ * Prefixes must end in a `/`.
|
|
|
+ * Examples:
|
|
|
+ * * File: gs://bucket-name/filename.json
|
|
|
+ * * Prefix: gs://bucket-name/prefix/here/
|
|
|
+ * * File: gs://bucket-name/prefix/here
|
|
|
+ * If multiple outputs, each response is still AnnotateFileResponse, each of
|
|
|
+ * which contains some subset of the full list of AnnotateImageResponse.
|
|
|
+ * Multiple outputs can happen if, for example, the output JSON is too large
|
|
|
+ * and overflows into multiple sharded files.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *uri;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The Google Cloud Storage location where the input will be read from.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1GcsSource : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Google Cloud Storage URI for the input file. This must only be a
|
|
|
+ * Google Cloud Storage object. Wildcards are not currently supported.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *uri;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * If an image was produced from a file (e.g. a PDF), this message gives
|
|
|
+ * information about the source of that image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1ImageAnnotationContext : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * If the file was a PDF or TIFF, this field gives the page number within
|
|
|
+ * the file used to produce the image.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *pageNumber;
|
|
|
+
|
|
|
+/** The URI of the file used to produce the image. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *uri;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Stores image properties, such as dominant colors.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1ImageProperties : GTLRObject
|
|
|
+
|
|
|
+/** If present, dominant colors completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1DominantColorsAnnotation *dominantColors;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The desired input location and metadata.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1InputConfig : GTLRObject
|
|
|
+
|
|
|
+/** The Google Cloud Storage location to read the input from. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1GcsSource *gcsSource;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The type of the file. Currently only "application/pdf" and "image/tiff"
|
|
|
+ * are supported. Wildcards are not supported.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *mimeType;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of detected objects with bounding boxes.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1LocalizedObjectAnnotation : GTLRObject
|
|
|
+
|
|
|
+/** Image region to which this object belongs. This must be populated. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The BCP-47 language code, such as "en-US" or "sr-Latn". For more
|
|
|
+ * information, see
|
|
|
+ * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *languageCode;
|
|
|
+
|
|
|
+/** Object ID that should align with EntityAnnotation mid. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *mid;
|
|
|
+
|
|
|
+/** Object name, expressed in its `language_code` language. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *name;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Score of the result. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected entity location information.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1LocationInfo : GTLRObject
|
|
|
+
|
|
|
+/** lat/long location coordinates. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_LatLng *latLng;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A vertex represents a 2D point in the image.
|
|
|
+ * NOTE: the normalized vertex coordinates are relative to the original image
|
|
|
+ * and range from 0 to 1.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1NormalizedVertex : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * X coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *x;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Y coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *y;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Contains metadata for the BatchAnnotateImages operation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1OperationMetadata : GTLRObject
|
|
|
+
|
|
|
+/** The time when the batch request was received. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *createTime;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Current state of the batch operation.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1OperationMetadata_State_Cancelled
|
|
|
+ * The batch processing was cancelled. (Value: "CANCELLED")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1OperationMetadata_State_Created
|
|
|
+ * Request is received. (Value: "CREATED")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1OperationMetadata_State_Done
|
|
|
+ * The batch processing is done. (Value: "DONE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1OperationMetadata_State_Running
|
|
|
+ * Request is actively being processed. (Value: "RUNNING")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1OperationMetadata_State_StateUnspecified
|
|
|
+ * Invalid. (Value: "STATE_UNSPECIFIED")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *state;
|
|
|
+
|
|
|
+/** The time when the operation result was last updated. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *updateTime;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The desired output location and metadata.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1OutputConfig : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The max number of response protos to put into each output JSON file on
|
|
|
+ * Google Cloud Storage.
|
|
|
+ * The valid range is [1, 100]. If not specified, the default value is 20.
|
|
|
+ * For example, for one pdf file with 100 pages, 100 response protos will
|
|
|
+ * be generated. If `batch_size` = 20, then 5 json files each
|
|
|
+ * containing 20 response protos will be written under the prefix
|
|
|
+ * `gcs_destination`.`uri`.
|
|
|
+ * Currently, batch_size only applies to GcsDestination, with potential future
|
|
|
+ * support for other output configurations.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *batchSize;
|
|
|
+
|
|
|
+/** The Google Cloud Storage location to write the output(s) to. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1GcsDestination *gcsDestination;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected page from OCR.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1Page : GTLRObject
|
|
|
+
|
|
|
+/** List of blocks of text, images etc on this page. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1Block *> *blocks;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results on the page. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Page height. For PDFs the unit is points. For images (including
|
|
|
+ * TIFFs) the unit is pixels.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *height;
|
|
|
+
|
|
|
+/** Additional information detected on the page. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationTextProperty *property;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Page width. For PDFs the unit is points. For images (including
|
|
|
+ * TIFFs) the unit is pixels.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *width;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Structural unit of text representing a number of words in certain order.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1Paragraph : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding box for the paragraph.
|
|
|
+ * The vertices are in the order of top-left, top-right, bottom-right,
|
|
|
+ * bottom-left. When a rotation of the bounding box is detected the rotation
|
|
|
+ * is represented as around the top-left corner as defined when the text is
|
|
|
+ * read in the 'natural' orientation.
|
|
|
+ * For example:
|
|
|
+ * * when the text is horizontal it might look like:
|
|
|
+ * 0----1
|
|
|
+ * | |
|
|
|
+ * 3----2
|
|
|
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
|
|
|
+ * 2----3
|
|
|
+ * | |
|
|
|
+ * 1----0
|
|
|
+ * and the vertex order will still be (0, 1, 2, 3).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1BoundingPoly *boundingBox;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results for the paragraph. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/** Additional information detected for the paragraph. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationTextProperty *property;
|
|
|
+
|
|
|
+/** List of words in this paragraph. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1Word *> *words;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A 3D position in the image, used primarily for Face detection landmarks.
|
|
|
+ * A valid Position must have both x and y coordinates.
|
|
|
+ * The position coordinates are in the same scale as the original image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1Position : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * X coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *x;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Y coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *y;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Z coordinate (or depth).
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *z;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A Product contains ReferenceImages.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1Product : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * User-provided metadata to be stored with this product. Must be at most 4096
|
|
|
+ * characters long.
|
|
|
+ *
|
|
|
+ * Remapped to 'descriptionProperty' to avoid NSObject's 'description'.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *descriptionProperty;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The user-provided name for this Product. Must not be empty. Must be at most
|
|
|
+ * 4096 characters long.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *displayName;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The resource name of the product.
|
|
|
+ * Format is:
|
|
|
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
|
|
|
+ * This field is ignored when creating a product.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *name;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The category for the product identified by the reference image. This should
|
|
|
+ * be either "homegoods", "apparel", or "toys".
|
|
|
+ * This field is immutable.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *productCategory;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Key-value pairs that can be attached to a product. At query time,
|
|
|
+ * constraints can be specified based on the product_labels.
|
|
|
+ * Note that integer values can be provided as strings, e.g. "1199". Only
|
|
|
+ * strings with integer values can match a range-based restriction which is
|
|
|
+ * to be supported soon.
|
|
|
+ * Multiple values can be assigned to the same key. One product may have up to
|
|
|
+ * 100 product_labels.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1ProductKeyValue *> *productLabels;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A product label represented as a key-value pair.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1ProductKeyValue : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The key of the label attached to the product. Cannot be empty and cannot
|
|
|
+ * exceed 128 bytes.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *key;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The value of the label attached to the product. Cannot be empty and
|
|
|
+ * cannot exceed 128 bytes.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *value;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Results for a product search request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1ProductSearchResults : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Timestamp of the index which provided these results. Changes made after
|
|
|
+ * this time are not reflected in the current results.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *indexTime;
|
|
|
+
|
|
|
+/**
|
|
|
+ * List of results grouped by products detected in the query image. Each entry
|
|
|
+ * corresponds to one bounding polygon in the query image, and contains the
|
|
|
+ * matching products specific to that region. There may be duplicate product
|
|
|
+ * matches in the union of all the per-product results.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1ProductSearchResultsGroupedResult *> *productGroupedResults;
|
|
|
+
|
|
|
+/** List of results, one for each product match. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1ProductSearchResultsResult *> *results;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Information about the products similar to a single product in a query
|
|
|
+ * image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1ProductSearchResultsGroupedResult : GTLRObject
|
|
|
+
|
|
|
+/** The bounding polygon around the product detected in the query image. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/** List of results, one for each product match. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1ProductSearchResultsResult *> *results;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Information about a product.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1ProductSearchResultsResult : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The resource name of the image from the product that is the closest match
|
|
|
+ * to the query.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *image;
|
|
|
+
|
|
|
+/** The Product. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1Product *product;
|
|
|
+
|
|
|
+/**
|
|
|
+ * A confidence level on the match, ranging from 0 (no confidence) to
|
|
|
+ * 1 (full confidence).
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A `Property` consists of a user-supplied name/value pair.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1Property : GTLRObject
|
|
|
+
|
|
|
+/** Name of the property. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *name;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Value of numeric properties.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of unsignedLongLongValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *uint64Value;
|
|
|
+
|
|
|
+/** Value of the property. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *value;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of features pertaining to the image, computed by computer vision
|
|
|
+ * methods over safe-search verticals (for example, adult, spoof, medical,
|
|
|
+ * violence).
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Represents the adult content likelihood for the image. Adult content may
|
|
|
+ * contain elements such as nudity, pornographic images or cartoons, or
|
|
|
+ * sexual activities.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Adult_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Adult_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Adult_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Adult_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Adult_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Adult_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *adult;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Likelihood that this is a medical image.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Medical_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Medical_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Medical_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Medical_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Medical_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Medical_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *medical;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Likelihood that the request image contains racy content. Racy content may
|
|
|
+ * include (but is not limited to) skimpy or sheer clothing, strategically
|
|
|
+ * covered nudity, lewd or provocative poses, or close-ups of sensitive
|
|
|
+ * body areas.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Racy_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Racy_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Racy_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Racy_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Racy_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Racy_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *racy;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Spoof likelihood. The likelihood that an modification
|
|
|
+ * was made to the image's canonical version to make it appear
|
|
|
+ * funny or offensive.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Spoof_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Spoof_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Spoof_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Spoof_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Spoof_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Spoof_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *spoof;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Likelihood that this image contains violent content.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Violence_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Violence_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Violence_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Violence_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Violence_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1SafeSearchAnnotation_Violence_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *violence;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A single symbol representation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1Symbol : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding box for the symbol.
|
|
|
+ * The vertices are in the order of top-left, top-right, bottom-right,
|
|
|
+ * bottom-left. When a rotation of the bounding box is detected the rotation
|
|
|
+ * is represented as around the top-left corner as defined when the text is
|
|
|
+ * read in the 'natural' orientation.
|
|
|
+ * For example:
|
|
|
+ * * when the text is horizontal it might look like:
|
|
|
+ * 0----1
|
|
|
+ * | |
|
|
|
+ * 3----2
|
|
|
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
|
|
|
+ * 2----3
|
|
|
+ * | |
|
|
|
+ * 1----0
|
|
|
+ * and the vertice order will still be (0, 1, 2, 3).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1BoundingPoly *boundingBox;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results for the symbol. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/** Additional information detected for the symbol. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationTextProperty *property;
|
|
|
+
|
|
|
+/** The actual UTF-8 representation of the symbol. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *text;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * TextAnnotation contains a structured representation of OCR extracted text.
|
|
|
+ * The hierarchy of an OCR extracted text structure is like this:
|
|
|
+ * TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
|
|
|
+ * Each structural component, starting from Page, may further have their own
|
|
|
+ * properties. Properties describe detected languages, breaks etc.. Please
|
|
|
+ * refer
|
|
|
+ * to the TextAnnotation.TextProperty message definition below for more
|
|
|
+ * detail.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotation : GTLRObject
|
|
|
+
|
|
|
+/** List of pages detected by OCR. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1Page *> *pages;
|
|
|
+
|
|
|
+/** UTF-8 text detected on the pages. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *text;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected start or end of a structural component.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * True if break prepends the element.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of boolValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *isPrefix;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected break type.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak_Type_EolSureSpace
|
|
|
+ * Line-wrapping break. (Value: "EOL_SURE_SPACE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak_Type_Hyphen
|
|
|
+ * End-line hyphen that is not present in text; does not co-occur with
|
|
|
+ * `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`. (Value: "HYPHEN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak_Type_LineBreak
|
|
|
+ * Line break that ends a paragraph. (Value: "LINE_BREAK")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak_Type_Space
|
|
|
+ * Regular space. (Value: "SPACE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak_Type_SureSpace
|
|
|
+ * Sure space (very wide). (Value: "SURE_SPACE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak_Type_Unknown
|
|
|
+ * Unknown break label type. (Value: "UNKNOWN")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *type;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected language for a structural component.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedLanguage : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of detected language. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The BCP-47 language code, such as "en-US" or "sr-Latn". For more
|
|
|
+ * information, see
|
|
|
+ * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *languageCode;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Additional information detected on the structural component.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationTextProperty : GTLRObject
|
|
|
+
|
|
|
+/** Detected start or end of a text segment. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedBreak *detectedBreak;
|
|
|
+
|
|
|
+/** A list of detected languages together with confidence. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationDetectedLanguage *> *detectedLanguages;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A vertex represents a 2D point in the image.
|
|
|
+ * NOTE: the vertex coordinates are in the same scale as the original image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1Vertex : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * X coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *x;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Y coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *y;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Relevant information for the image from the Internet.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1WebDetection : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The service's best guess as to the topic of the request image.
|
|
|
+ * Inferred from similar images on the open web.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1WebDetectionWebLabel *> *bestGuessLabels;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Fully matching images from the Internet.
|
|
|
+ * Can include resized copies of the query image.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1WebDetectionWebImage *> *fullMatchingImages;
|
|
|
+
|
|
|
+/** Web pages containing the matching images from the Internet. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1WebDetectionWebPage *> *pagesWithMatchingImages;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Partial matching images from the Internet.
|
|
|
+ * Those images are similar enough to share some key-point features. For
|
|
|
+ * example an original image will likely have partial matching for its crops.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1WebDetectionWebImage *> *partialMatchingImages;
|
|
|
+
|
|
|
+/** The visually similar image results. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1WebDetectionWebImage *> *visuallySimilarImages;
|
|
|
+
|
|
|
+/** Deduced entities from similar images on the Internet. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1WebDetectionWebEntity *> *webEntities;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Entity deduced from similar images on the Internet.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1WebDetectionWebEntity : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Canonical description of the entity, in English.
|
|
|
+ *
|
|
|
+ * Remapped to 'descriptionProperty' to avoid NSObject's 'description'.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *descriptionProperty;
|
|
|
+
|
|
|
+/** Opaque entity ID. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *entityId;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Overall relevancy score for the entity.
|
|
|
+ * Not normalized and not comparable across different image queries.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Metadata for online images.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1WebDetectionWebImage : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * (Deprecated) Overall relevancy score for the image.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+/** The result image URL. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *url;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Label to provide extra metadata for the web detection.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1WebDetectionWebLabel : GTLRObject
|
|
|
+
|
|
|
+/** Label for extra metadata. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *label;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
|
|
|
+ * For more information, see
|
|
|
+ * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *languageCode;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Metadata for web pages.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1WebDetectionWebPage : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Fully matching images on the page.
|
|
|
+ * Can include resized copies of the query image.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1WebDetectionWebImage *> *fullMatchingImages;
|
|
|
+
|
|
|
+/** Title for the web page, may contain HTML markups. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *pageTitle;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Partial matching images on the page.
|
|
|
+ * Those images are similar enough to share some key-point features. For
|
|
|
+ * example an original image will likely have partial matching for its
|
|
|
+ * crops.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1WebDetectionWebImage *> *partialMatchingImages;
|
|
|
+
|
|
|
+/**
|
|
|
+ * (Deprecated) Overall relevancy score for the web page.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+/** The result web page URL. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *url;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A word representation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p1beta1Word : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding box for the word.
|
|
|
+ * The vertices are in the order of top-left, top-right, bottom-right,
|
|
|
+ * bottom-left. When a rotation of the bounding box is detected the rotation
|
|
|
+ * is represented as around the top-left corner as defined when the text is
|
|
|
+ * read in the 'natural' orientation.
|
|
|
+ * For example:
|
|
|
+ * * when the text is horizontal it might look like:
|
|
|
+ * 0----1
|
|
|
+ * | |
|
|
|
+ * 3----2
|
|
|
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
|
|
|
+ * 2----3
|
|
|
+ * | |
|
|
|
+ * 1----0
|
|
|
+ * and the vertex order will still be (0, 1, 2, 3).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1BoundingPoly *boundingBox;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results for the word. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/** Additional information detected for the word. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p1beta1TextAnnotationTextProperty *property;
|
|
|
+
|
|
|
+/**
|
|
|
+ * List of symbols in the word.
|
|
|
+ * The order of the symbols follows the natural reading order.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p1beta1Symbol *> *symbols;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response to a single file annotation request. A file may contain one or more
|
|
|
+ * images, which individually have their own responses.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1AnnotateFileResponse : GTLRObject
|
|
|
+
|
|
|
+/** Information about the file for which this response is generated. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1InputConfig *inputConfig;
|
|
|
+
|
|
|
+/** Individual responses to images found within the file. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1AnnotateImageResponse *> *responses;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response to an image annotation request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1AnnotateImageResponse : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * If present, contextual information is needed to understand where this image
|
|
|
+ * comes from.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1ImageAnnotationContext *context;
|
|
|
+
|
|
|
+/** If present, crop hints have completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1CropHintsAnnotation *cropHintsAnnotation;
|
|
|
+
|
|
|
+/**
|
|
|
+ * If set, represents the error message for the operation.
|
|
|
+ * Note that filled-in image annotations are guaranteed to be
|
|
|
+ * correct, even when `error` is set.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_Status *error;
|
|
|
+
|
|
|
+/** If present, face detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation *> *faceAnnotations;
|
|
|
+
|
|
|
+/**
|
|
|
+ * If present, text (OCR) detection or document (OCR) text detection has
|
|
|
+ * completed successfully.
|
|
|
+ * This annotation provides the structural hierarchy for the OCR detected
|
|
|
+ * text.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotation *fullTextAnnotation;
|
|
|
+
|
|
|
+/** If present, image properties were extracted successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1ImageProperties *imagePropertiesAnnotation;
|
|
|
+
|
|
|
+/** If present, label detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1EntityAnnotation *> *labelAnnotations;
|
|
|
+
|
|
|
+/** If present, landmark detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1EntityAnnotation *> *landmarkAnnotations;
|
|
|
+
|
|
|
+/**
|
|
|
+ * If present, localized object detection has completed successfully.
|
|
|
+ * This will be sorted descending by confidence score.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1LocalizedObjectAnnotation *> *localizedObjectAnnotations;
|
|
|
+
|
|
|
+/** If present, logo detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1EntityAnnotation *> *logoAnnotations;
|
|
|
+
|
|
|
+/** If present, product search has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1ProductSearchResults *productSearchResults;
|
|
|
+
|
|
|
+/** If present, safe-search annotation has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation *safeSearchAnnotation;
|
|
|
+
|
|
|
+/** If present, text (OCR) detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1EntityAnnotation *> *textAnnotations;
|
|
|
+
|
|
|
+/** If present, web detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1WebDetection *webDetection;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The response for a single offline file annotation request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1AsyncAnnotateFileResponse : GTLRObject
|
|
|
+
|
|
|
+/** The output location and metadata from AsyncAnnotateFileRequest. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1OutputConfig *outputConfig;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response to an async batch file annotation request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1AsyncBatchAnnotateFilesResponse : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The list of file annotation responses, one for each request in
|
|
|
+ * AsyncBatchAnnotateFilesRequest.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1AsyncAnnotateFileResponse *> *responses;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Logical element on the page.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1Block : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected block type (text, image etc) for this block.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1Block_BlockType_Barcode
|
|
|
+ * Barcode block. (Value: "BARCODE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1Block_BlockType_Picture
|
|
|
+ * Image block. (Value: "PICTURE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1Block_BlockType_Ruler
|
|
|
+ * Horizontal/vertical line box. (Value: "RULER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1Block_BlockType_Table Table
|
|
|
+ * block. (Value: "TABLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1Block_BlockType_Text Regular
|
|
|
+ * text block. (Value: "TEXT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1Block_BlockType_Unknown
|
|
|
+ * Unknown block type. (Value: "UNKNOWN")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *blockType;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding box for the block.
|
|
|
+ * The vertices are in the order of top-left, top-right, bottom-right,
|
|
|
+ * bottom-left. When a rotation of the bounding box is detected the rotation
|
|
|
+ * is represented as around the top-left corner as defined when the text is
|
|
|
+ * read in the 'natural' orientation.
|
|
|
+ * For example:
|
|
|
+ * * when the text is horizontal it might look like:
|
|
|
+ * 0----1
|
|
|
+ * | |
|
|
|
+ * 3----2
|
|
|
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
|
|
|
+ * 2----3
|
|
|
+ * | |
|
|
|
+ * 1----0
|
|
|
+ * and the vertex order will still be (0, 1, 2, 3).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1BoundingPoly *boundingBox;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results on the block. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/** List of paragraphs in this block (if this blocks is of type text). */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1Paragraph *> *paragraphs;
|
|
|
+
|
|
|
+/** Additional information detected for the block. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationTextProperty *property;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A bounding polygon for the detected image annotation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1BoundingPoly : GTLRObject
|
|
|
+
|
|
|
+/** The bounding polygon normalized vertices. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1NormalizedVertex *> *normalizedVertices;
|
|
|
+
|
|
|
+/** The bounding polygon vertices. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1Vertex *> *vertices;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Color information consists of RGB channels, score, and the fraction of
|
|
|
+ * the image that the color occupies in the image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1ColorInfo : GTLRObject
|
|
|
+
|
|
|
+/** RGB components of the color. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_Color *color;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The fraction of pixels the color occupies in the image.
|
|
|
+ * Value in range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *pixelFraction;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Image-specific score for this color. Value in range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Single crop hint that is used to generate a new crop when serving an image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1CropHint : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding polygon for the crop region. The coordinates of the bounding
|
|
|
+ * box are in the original image's scale.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of this being a salient region. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Fraction of importance of this salient region with respect to the original
|
|
|
+ * image.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *importanceFraction;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of crop hints that are used to generate new crops when serving images.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1CropHintsAnnotation : GTLRObject
|
|
|
+
|
|
|
+/** Crop hint results. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1CropHint *> *cropHints;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of dominant colors and their corresponding scores.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1DominantColorsAnnotation : GTLRObject
|
|
|
+
|
|
|
+/** RGB color values with their score and pixel fraction. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1ColorInfo *> *colors;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of detected entity features.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1EntityAnnotation : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Image region to which this entity belongs. Not produced
|
|
|
+ * for `LABEL_DETECTION` features.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * **Deprecated. Use `score` instead.**
|
|
|
+ * The accuracy of the entity detection in an image.
|
|
|
+ * For example, for an image in which the "Eiffel Tower" entity is detected,
|
|
|
+ * this field represents the confidence that there is a tower in the query
|
|
|
+ * image. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Entity textual description, expressed in its `locale` language.
|
|
|
+ *
|
|
|
+ * Remapped to 'descriptionProperty' to avoid NSObject's 'description'.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *descriptionProperty;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The language code for the locale in which the entity textual
|
|
|
+ * `description` is expressed.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *locale;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The location information for the detected entity. Multiple
|
|
|
+ * `LocationInfo` elements can be present because one location may
|
|
|
+ * indicate the location of the scene in the image, and another location
|
|
|
+ * may indicate the location of the place where the image was taken.
|
|
|
+ * Location information is usually present for landmarks.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1LocationInfo *> *locations;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Opaque entity ID. Some IDs may be available in
|
|
|
+ * [Google Knowledge Graph Search
|
|
|
+ * API](https://developers.google.com/knowledge-graph/).
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *mid;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Some entities may have optional user-supplied `Property` (name/value)
|
|
|
+ * fields, such a score or string that qualifies the entity.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1Property *> *properties;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Overall score of the result. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The relevancy of the ICA (Image Content Annotation) label to the
|
|
|
+ * image. For example, the relevancy of "tower" is likely higher to an image
|
|
|
+ * containing the detected "Eiffel Tower" than to an image containing a
|
|
|
+ * detected distant towering building, even though the confidence that
|
|
|
+ * there is a tower in each image may be the same. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *topicality;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A face annotation object contains the results of face detection.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Anger likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_AngerLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_AngerLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_AngerLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_AngerLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_AngerLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_AngerLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *angerLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Blurred likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_BlurredLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_BlurredLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_BlurredLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_BlurredLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_BlurredLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_BlurredLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *blurredLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding polygon around the face. The coordinates of the bounding box
|
|
|
+ * are in the original image's scale.
|
|
|
+ * The bounding box is computed to "frame" the face in accordance with human
|
|
|
+ * expectations. It is based on the landmarker results.
|
|
|
+ * Note that one or more x and/or y coordinates may not be generated in the
|
|
|
+ * `BoundingPoly` (the polygon will be unbounded) if only a partial face
|
|
|
+ * appears in the image to be annotated.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detection confidence. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *detectionConfidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The `fd_bounding_poly` bounding polygon is tighter than the
|
|
|
+ * `boundingPoly`, and encloses only the skin part of the face. Typically, it
|
|
|
+ * is used to eliminate the face from any image analysis that detects the
|
|
|
+ * "amount of skin" visible in an image. It is not based on the
|
|
|
+ * landmarker results, only on the initial face detection, hence
|
|
|
+ * the <code>fd</code> (face detection) prefix.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1BoundingPoly *fdBoundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Headwear likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_HeadwearLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_HeadwearLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_HeadwearLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_HeadwearLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_HeadwearLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_HeadwearLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *headwearLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Joy likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_JoyLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_JoyLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_JoyLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_JoyLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_JoyLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_JoyLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *joyLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Face landmarking confidence. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *landmarkingConfidence;
|
|
|
+
|
|
|
+/** Detected face landmarks. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark *> *landmarks;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Yaw angle, which indicates the leftward/rightward angle that the face is
|
|
|
+ * pointing relative to the vertical plane perpendicular to the image. Range
|
|
|
+ * [-180,180].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *panAngle;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
|
|
|
+ * of the face relative to the image vertical about the axis perpendicular to
|
|
|
+ * the face. Range [-180,180].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *rollAngle;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Sorrow likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SorrowLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SorrowLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SorrowLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SorrowLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SorrowLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SorrowLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *sorrowLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Surprise likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SurpriseLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SurpriseLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SurpriseLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SurpriseLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SurpriseLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_SurpriseLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *surpriseLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Pitch angle, which indicates the upwards/downwards angle that the face is
|
|
|
+ * pointing relative to the image's horizontal plane. Range [-180,180].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *tiltAngle;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Under-exposed likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_UnderExposedLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_UnderExposedLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_UnderExposedLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_UnderExposedLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_UnderExposedLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotation_UnderExposedLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *underExposedLikelihood;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A face-specific landmark (for example, a face feature).
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark : GTLRObject
|
|
|
+
|
|
|
+/** Face landmark position. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1Position *position;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Face landmark type.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_ChinGnathion
|
|
|
+ * Chin gnathion. (Value: "CHIN_GNATHION")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_ChinLeftGonion
|
|
|
+ * Chin left gonion. (Value: "CHIN_LEFT_GONION")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_ChinRightGonion
|
|
|
+ * Chin right gonion. (Value: "CHIN_RIGHT_GONION")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_ForeheadGlabella
|
|
|
+ * Forehead glabella. (Value: "FOREHEAD_GLABELLA")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftEarTragion
|
|
|
+ * Left ear tragion. (Value: "LEFT_EAR_TRAGION")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftEye
|
|
|
+ * Left eye. (Value: "LEFT_EYE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftEyeBottomBoundary
|
|
|
+ * Left eye, bottom boundary. (Value: "LEFT_EYE_BOTTOM_BOUNDARY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftEyebrowUpperMidpoint
|
|
|
+ * Left eyebrow, upper midpoint. (Value: "LEFT_EYEBROW_UPPER_MIDPOINT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftEyeLeftCorner
|
|
|
+ * Left eye, left corner. (Value: "LEFT_EYE_LEFT_CORNER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftEyePupil
|
|
|
+ * Left eye pupil. (Value: "LEFT_EYE_PUPIL")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftEyeRightCorner
|
|
|
+ * Left eye, right corner. (Value: "LEFT_EYE_RIGHT_CORNER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftEyeTopBoundary
|
|
|
+ * Left eye, top boundary. (Value: "LEFT_EYE_TOP_BOUNDARY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftOfLeftEyebrow
|
|
|
+ * Left of left eyebrow. (Value: "LEFT_OF_LEFT_EYEBROW")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LeftOfRightEyebrow
|
|
|
+ * Left of right eyebrow. (Value: "LEFT_OF_RIGHT_EYEBROW")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_LowerLip
|
|
|
+ * Lower lip. (Value: "LOWER_LIP")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_MidpointBetweenEyes
|
|
|
+ * Midpoint between eyes. (Value: "MIDPOINT_BETWEEN_EYES")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_MouthCenter
|
|
|
+ * Mouth center. (Value: "MOUTH_CENTER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_MouthLeft
|
|
|
+ * Mouth left. (Value: "MOUTH_LEFT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_MouthRight
|
|
|
+ * Mouth right. (Value: "MOUTH_RIGHT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_NoseBottomCenter
|
|
|
+ * Nose, bottom center. (Value: "NOSE_BOTTOM_CENTER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_NoseBottomLeft
|
|
|
+ * Nose, bottom left. (Value: "NOSE_BOTTOM_LEFT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_NoseBottomRight
|
|
|
+ * Nose, bottom right. (Value: "NOSE_BOTTOM_RIGHT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_NoseTip
|
|
|
+ * Nose tip. (Value: "NOSE_TIP")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightEarTragion
|
|
|
+ * Right ear tragion. (Value: "RIGHT_EAR_TRAGION")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightEye
|
|
|
+ * Right eye. (Value: "RIGHT_EYE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightEyeBottomBoundary
|
|
|
+ * Right eye, bottom boundary. (Value: "RIGHT_EYE_BOTTOM_BOUNDARY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightEyebrowUpperMidpoint
|
|
|
+ * Right eyebrow, upper midpoint. (Value: "RIGHT_EYEBROW_UPPER_MIDPOINT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightEyeLeftCorner
|
|
|
+ * Right eye, left corner. (Value: "RIGHT_EYE_LEFT_CORNER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightEyePupil
|
|
|
+ * Right eye pupil. (Value: "RIGHT_EYE_PUPIL")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightEyeRightCorner
|
|
|
+ * Right eye, right corner. (Value: "RIGHT_EYE_RIGHT_CORNER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightEyeTopBoundary
|
|
|
+ * Right eye, top boundary. (Value: "RIGHT_EYE_TOP_BOUNDARY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightOfLeftEyebrow
|
|
|
+ * Right of left eyebrow. (Value: "RIGHT_OF_LEFT_EYEBROW")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_RightOfRightEyebrow
|
|
|
+ * Right of right eyebrow. (Value: "RIGHT_OF_RIGHT_EYEBROW")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_UnknownLandmark
|
|
|
+ * Unknown face landmark detected. Should not be filled. (Value:
|
|
|
+ * "UNKNOWN_LANDMARK")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1FaceAnnotationLandmark_Type_UpperLip
|
|
|
+ * Upper lip. (Value: "UPPER_LIP")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *type;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The Google Cloud Storage location where the output will be written to.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1GcsDestination : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Google Cloud Storage URI where the results will be stored. Results will
|
|
|
+ * be in JSON format and preceded by its corresponding input URI. This field
|
|
|
+ * can either represent a single file, or a prefix for multiple outputs.
|
|
|
+ * Prefixes must end in a `/`.
|
|
|
+ * Examples:
|
|
|
+ * * File: gs://bucket-name/filename.json
|
|
|
+ * * Prefix: gs://bucket-name/prefix/here/
|
|
|
+ * * File: gs://bucket-name/prefix/here
|
|
|
+ * If multiple outputs, each response is still AnnotateFileResponse, each of
|
|
|
+ * which contains some subset of the full list of AnnotateImageResponse.
|
|
|
+ * Multiple outputs can happen if, for example, the output JSON is too large
|
|
|
+ * and overflows into multiple sharded files.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *uri;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The Google Cloud Storage location where the input will be read from.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1GcsSource : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Google Cloud Storage URI for the input file. This must only be a
|
|
|
+ * Google Cloud Storage object. Wildcards are not currently supported.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *uri;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * If an image was produced from a file (e.g. a PDF), this message gives
|
|
|
+ * information about the source of that image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1ImageAnnotationContext : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * If the file was a PDF or TIFF, this field gives the page number within
|
|
|
+ * the file used to produce the image.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *pageNumber;
|
|
|
+
|
|
|
+/** The URI of the file used to produce the image. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *uri;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Stores image properties, such as dominant colors.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1ImageProperties : GTLRObject
|
|
|
+
|
|
|
+/** If present, dominant colors completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1DominantColorsAnnotation *dominantColors;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The desired input location and metadata.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1InputConfig : GTLRObject
|
|
|
+
|
|
|
+/** The Google Cloud Storage location to read the input from. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1GcsSource *gcsSource;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The type of the file. Currently only "application/pdf" and "image/tiff"
|
|
|
+ * are supported. Wildcards are not supported.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *mimeType;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of detected objects with bounding boxes.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1LocalizedObjectAnnotation : GTLRObject
|
|
|
+
|
|
|
+/** Image region to which this object belongs. This must be populated. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The BCP-47 language code, such as "en-US" or "sr-Latn". For more
|
|
|
+ * information, see
|
|
|
+ * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *languageCode;
|
|
|
+
|
|
|
+/** Object ID that should align with EntityAnnotation mid. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *mid;
|
|
|
+
|
|
|
+/** Object name, expressed in its `language_code` language. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *name;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Score of the result. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected entity location information.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1LocationInfo : GTLRObject
|
|
|
+
|
|
|
+/** lat/long location coordinates. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_LatLng *latLng;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A vertex represents a 2D point in the image.
|
|
|
+ * NOTE: the normalized vertex coordinates are relative to the original image
|
|
|
+ * and range from 0 to 1.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1NormalizedVertex : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * X coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *x;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Y coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *y;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Contains metadata for the BatchAnnotateImages operation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1OperationMetadata : GTLRObject
|
|
|
+
|
|
|
+/** The time when the batch request was received. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *createTime;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Current state of the batch operation.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1OperationMetadata_State_Cancelled
|
|
|
+ * The batch processing was cancelled. (Value: "CANCELLED")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1OperationMetadata_State_Created
|
|
|
+ * Request is received. (Value: "CREATED")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1OperationMetadata_State_Done
|
|
|
+ * The batch processing is done. (Value: "DONE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1OperationMetadata_State_Running
|
|
|
+ * Request is actively being processed. (Value: "RUNNING")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1OperationMetadata_State_StateUnspecified
|
|
|
+ * Invalid. (Value: "STATE_UNSPECIFIED")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *state;
|
|
|
+
|
|
|
+/** The time when the operation result was last updated. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *updateTime;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The desired output location and metadata.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1OutputConfig : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The max number of response protos to put into each output JSON file on
|
|
|
+ * Google Cloud Storage.
|
|
|
+ * The valid range is [1, 100]. If not specified, the default value is 20.
|
|
|
+ * For example, for one pdf file with 100 pages, 100 response protos will
|
|
|
+ * be generated. If `batch_size` = 20, then 5 json files each
|
|
|
+ * containing 20 response protos will be written under the prefix
|
|
|
+ * `gcs_destination`.`uri`.
|
|
|
+ * Currently, batch_size only applies to GcsDestination, with potential future
|
|
|
+ * support for other output configurations.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *batchSize;
|
|
|
+
|
|
|
+/** The Google Cloud Storage location to write the output(s) to. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1GcsDestination *gcsDestination;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected page from OCR.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1Page : GTLRObject
|
|
|
+
|
|
|
+/** List of blocks of text, images etc on this page. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1Block *> *blocks;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results on the page. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Page height. For PDFs the unit is points. For images (including
|
|
|
+ * TIFFs) the unit is pixels.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *height;
|
|
|
+
|
|
|
+/** Additional information detected on the page. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationTextProperty *property;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Page width. For PDFs the unit is points. For images (including
|
|
|
+ * TIFFs) the unit is pixels.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *width;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Structural unit of text representing a number of words in certain order.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1Paragraph : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding box for the paragraph.
|
|
|
+ * The vertices are in the order of top-left, top-right, bottom-right,
|
|
|
+ * bottom-left. When a rotation of the bounding box is detected the rotation
|
|
|
+ * is represented as around the top-left corner as defined when the text is
|
|
|
+ * read in the 'natural' orientation.
|
|
|
+ * For example:
|
|
|
+ * * when the text is horizontal it might look like:
|
|
|
+ * 0----1
|
|
|
+ * | |
|
|
|
+ * 3----2
|
|
|
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
|
|
|
+ * 2----3
|
|
|
+ * | |
|
|
|
+ * 1----0
|
|
|
+ * and the vertex order will still be (0, 1, 2, 3).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1BoundingPoly *boundingBox;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results for the paragraph. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/** Additional information detected for the paragraph. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationTextProperty *property;
|
|
|
+
|
|
|
+/** List of words in this paragraph. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1Word *> *words;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A 3D position in the image, used primarily for Face detection landmarks.
|
|
|
+ * A valid Position must have both x and y coordinates.
|
|
|
+ * The position coordinates are in the same scale as the original image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1Position : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * X coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *x;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Y coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *y;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Z coordinate (or depth).
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *z;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A Product contains ReferenceImages.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1Product : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * User-provided metadata to be stored with this product. Must be at most 4096
|
|
|
+ * characters long.
|
|
|
+ *
|
|
|
+ * Remapped to 'descriptionProperty' to avoid NSObject's 'description'.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *descriptionProperty;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The user-provided name for this Product. Must not be empty. Must be at most
|
|
|
+ * 4096 characters long.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *displayName;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The resource name of the product.
|
|
|
+ * Format is:
|
|
|
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
|
|
|
+ * This field is ignored when creating a product.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *name;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The category for the product identified by the reference image. This should
|
|
|
+ * be either "homegoods", "apparel", or "toys".
|
|
|
+ * This field is immutable.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *productCategory;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Key-value pairs that can be attached to a product. At query time,
|
|
|
+ * constraints can be specified based on the product_labels.
|
|
|
+ * Note that integer values can be provided as strings, e.g. "1199". Only
|
|
|
+ * strings with integer values can match a range-based restriction which is
|
|
|
+ * to be supported soon.
|
|
|
+ * Multiple values can be assigned to the same key. One product may have up to
|
|
|
+ * 100 product_labels.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1ProductKeyValue *> *productLabels;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A product label represented as a key-value pair.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1ProductKeyValue : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The key of the label attached to the product. Cannot be empty and cannot
|
|
|
+ * exceed 128 bytes.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *key;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The value of the label attached to the product. Cannot be empty and
|
|
|
+ * cannot exceed 128 bytes.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *value;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Results for a product search request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1ProductSearchResults : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Timestamp of the index which provided these results. Changes made after
|
|
|
+ * this time are not reflected in the current results.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *indexTime;
|
|
|
+
|
|
|
+/**
|
|
|
+ * List of results grouped by products detected in the query image. Each entry
|
|
|
+ * corresponds to one bounding polygon in the query image, and contains the
|
|
|
+ * matching products specific to that region. There may be duplicate product
|
|
|
+ * matches in the union of all the per-product results.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1ProductSearchResultsGroupedResult *> *productGroupedResults;
|
|
|
+
|
|
|
+/** List of results, one for each product match. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1ProductSearchResultsResult *> *results;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Information about the products similar to a single product in a query
|
|
|
+ * image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1ProductSearchResultsGroupedResult : GTLRObject
|
|
|
+
|
|
|
+/** The bounding polygon around the product detected in the query image. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/** List of results, one for each product match. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1ProductSearchResultsResult *> *results;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Information about a product.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1ProductSearchResultsResult : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The resource name of the image from the product that is the closest match
|
|
|
+ * to the query.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *image;
|
|
|
+
|
|
|
+/** The Product. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1Product *product;
|
|
|
+
|
|
|
+/**
|
|
|
+ * A confidence level on the match, ranging from 0 (no confidence) to
|
|
|
+ * 1 (full confidence).
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A `Property` consists of a user-supplied name/value pair.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1Property : GTLRObject
|
|
|
+
|
|
|
+/** Name of the property. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *name;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Value of numeric properties.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of unsignedLongLongValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *uint64Value;
|
|
|
+
|
|
|
+/** Value of the property. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *value;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of features pertaining to the image, computed by computer vision
|
|
|
+ * methods over safe-search verticals (for example, adult, spoof, medical,
|
|
|
+ * violence).
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Represents the adult content likelihood for the image. Adult content may
|
|
|
+ * contain elements such as nudity, pornographic images or cartoons, or
|
|
|
+ * sexual activities.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Adult_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Adult_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Adult_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Adult_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Adult_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Adult_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *adult;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Likelihood that this is a medical image.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Medical_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Medical_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Medical_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Medical_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Medical_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Medical_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *medical;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Likelihood that the request image contains racy content. Racy content may
|
|
|
+ * include (but is not limited to) skimpy or sheer clothing, strategically
|
|
|
+ * covered nudity, lewd or provocative poses, or close-ups of sensitive
|
|
|
+ * body areas.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Racy_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Racy_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Racy_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Racy_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Racy_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Racy_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *racy;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Spoof likelihood. The likelihood that an modification
|
|
|
+ * was made to the image's canonical version to make it appear
|
|
|
+ * funny or offensive.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Spoof_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Spoof_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Spoof_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Spoof_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Spoof_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Spoof_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *spoof;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Likelihood that this image contains violent content.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Violence_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Violence_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Violence_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Violence_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Violence_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1SafeSearchAnnotation_Violence_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *violence;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A single symbol representation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1Symbol : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding box for the symbol.
|
|
|
+ * The vertices are in the order of top-left, top-right, bottom-right,
|
|
|
+ * bottom-left. When a rotation of the bounding box is detected the rotation
|
|
|
+ * is represented as around the top-left corner as defined when the text is
|
|
|
+ * read in the 'natural' orientation.
|
|
|
+ * For example:
|
|
|
+ * * when the text is horizontal it might look like:
|
|
|
+ * 0----1
|
|
|
+ * | |
|
|
|
+ * 3----2
|
|
|
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
|
|
|
+ * 2----3
|
|
|
+ * | |
|
|
|
+ * 1----0
|
|
|
+ * and the vertice order will still be (0, 1, 2, 3).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1BoundingPoly *boundingBox;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results for the symbol. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/** Additional information detected for the symbol. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationTextProperty *property;
|
|
|
+
|
|
|
+/** The actual UTF-8 representation of the symbol. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *text;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * TextAnnotation contains a structured representation of OCR extracted text.
|
|
|
+ * The hierarchy of an OCR extracted text structure is like this:
|
|
|
+ * TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
|
|
|
+ * Each structural component, starting from Page, may further have their own
|
|
|
+ * properties. Properties describe detected languages, breaks etc.. Please
|
|
|
+ * refer
|
|
|
+ * to the TextAnnotation.TextProperty message definition below for more
|
|
|
+ * detail.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotation : GTLRObject
|
|
|
+
|
|
|
+/** List of pages detected by OCR. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1Page *> *pages;
|
|
|
+
|
|
|
+/** UTF-8 text detected on the pages. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *text;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected start or end of a structural component.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * True if break prepends the element.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of boolValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *isPrefix;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected break type.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak_Type_EolSureSpace
|
|
|
+ * Line-wrapping break. (Value: "EOL_SURE_SPACE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak_Type_Hyphen
|
|
|
+ * End-line hyphen that is not present in text; does not co-occur with
|
|
|
+ * `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`. (Value: "HYPHEN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak_Type_LineBreak
|
|
|
+ * Line break that ends a paragraph. (Value: "LINE_BREAK")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak_Type_Space
|
|
|
+ * Regular space. (Value: "SPACE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak_Type_SureSpace
|
|
|
+ * Sure space (very wide). (Value: "SURE_SPACE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak_Type_Unknown
|
|
|
+ * Unknown break label type. (Value: "UNKNOWN")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *type;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected language for a structural component.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedLanguage : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of detected language. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The BCP-47 language code, such as "en-US" or "sr-Latn". For more
|
|
|
+ * information, see
|
|
|
+ * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *languageCode;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Additional information detected on the structural component.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationTextProperty : GTLRObject
|
|
|
+
|
|
|
+/** Detected start or end of a text segment. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak *detectedBreak;
|
|
|
+
|
|
|
+/** A list of detected languages together with confidence. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationDetectedLanguage *> *detectedLanguages;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A vertex represents a 2D point in the image.
|
|
|
+ * NOTE: the vertex coordinates are in the same scale as the original image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1Vertex : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * X coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *x;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Y coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *y;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Relevant information for the image from the Internet.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1WebDetection : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The service's best guess as to the topic of the request image.
|
|
|
+ * Inferred from similar images on the open web.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1WebDetectionWebLabel *> *bestGuessLabels;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Fully matching images from the Internet.
|
|
|
+ * Can include resized copies of the query image.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1WebDetectionWebImage *> *fullMatchingImages;
|
|
|
+
|
|
|
+/** Web pages containing the matching images from the Internet. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1WebDetectionWebPage *> *pagesWithMatchingImages;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Partial matching images from the Internet.
|
|
|
+ * Those images are similar enough to share some key-point features. For
|
|
|
+ * example an original image will likely have partial matching for its crops.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1WebDetectionWebImage *> *partialMatchingImages;
|
|
|
+
|
|
|
+/** The visually similar image results. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1WebDetectionWebImage *> *visuallySimilarImages;
|
|
|
+
|
|
|
+/** Deduced entities from similar images on the Internet. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1WebDetectionWebEntity *> *webEntities;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Entity deduced from similar images on the Internet.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1WebDetectionWebEntity : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Canonical description of the entity, in English.
|
|
|
+ *
|
|
|
+ * Remapped to 'descriptionProperty' to avoid NSObject's 'description'.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *descriptionProperty;
|
|
|
+
|
|
|
+/** Opaque entity ID. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *entityId;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Overall relevancy score for the entity.
|
|
|
+ * Not normalized and not comparable across different image queries.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Metadata for online images.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1WebDetectionWebImage : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * (Deprecated) Overall relevancy score for the image.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+/** The result image URL. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *url;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Label to provide extra metadata for the web detection.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1WebDetectionWebLabel : GTLRObject
|
|
|
+
|
|
|
+/** Label for extra metadata. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *label;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
|
|
|
+ * For more information, see
|
|
|
+ * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *languageCode;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Metadata for web pages.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1WebDetectionWebPage : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Fully matching images on the page.
|
|
|
+ * Can include resized copies of the query image.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1WebDetectionWebImage *> *fullMatchingImages;
|
|
|
+
|
|
|
+/** Title for the web page, may contain HTML markups. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *pageTitle;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Partial matching images on the page.
|
|
|
+ * Those images are similar enough to share some key-point features. For
|
|
|
+ * example an original image will likely have partial matching for its
|
|
|
+ * crops.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1WebDetectionWebImage *> *partialMatchingImages;
|
|
|
+
|
|
|
+/**
|
|
|
+ * (Deprecated) Overall relevancy score for the web page.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+/** The result web page URL. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *url;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A word representation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p2beta1Word : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding box for the word.
|
|
|
+ * The vertices are in the order of top-left, top-right, bottom-right,
|
|
|
+ * bottom-left. When a rotation of the bounding box is detected the rotation
|
|
|
+ * is represented as around the top-left corner as defined when the text is
|
|
|
+ * read in the 'natural' orientation.
|
|
|
+ * For example:
|
|
|
+ * * when the text is horizontal it might look like:
|
|
|
+ * 0----1
|
|
|
+ * | |
|
|
|
+ * 3----2
|
|
|
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
|
|
|
+ * 2----3
|
|
|
+ * | |
|
|
|
+ * 1----0
|
|
|
+ * and the vertex order will still be (0, 1, 2, 3).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1BoundingPoly *boundingBox;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results for the word. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/** Additional information detected for the word. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p2beta1TextAnnotationTextProperty *property;
|
|
|
+
|
|
|
+/**
|
|
|
+ * List of symbols in the word.
|
|
|
+ * The order of the symbols follows the natural reading order.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p2beta1Symbol *> *symbols;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response to a single file annotation request. A file may contain one or more
|
|
|
+ * images, which individually have their own responses.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1AnnotateFileResponse : GTLRObject
|
|
|
+
|
|
|
+/** Information about the file for which this response is generated. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1InputConfig *inputConfig;
|
|
|
+
|
|
|
+/** Individual responses to images found within the file. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1AnnotateImageResponse *> *responses;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response to an image annotation request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1AnnotateImageResponse : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * If present, contextual information is needed to understand where this image
|
|
|
+ * comes from.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1ImageAnnotationContext *context;
|
|
|
+
|
|
|
+/** If present, crop hints have completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1CropHintsAnnotation *cropHintsAnnotation;
|
|
|
+
|
|
|
+/**
|
|
|
+ * If set, represents the error message for the operation.
|
|
|
+ * Note that filled-in image annotations are guaranteed to be
|
|
|
+ * correct, even when `error` is set.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_Status *error;
|
|
|
+
|
|
|
+/** If present, face detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation *> *faceAnnotations;
|
|
|
+
|
|
|
+/**
|
|
|
+ * If present, text (OCR) detection or document (OCR) text detection has
|
|
|
+ * completed successfully.
|
|
|
+ * This annotation provides the structural hierarchy for the OCR detected
|
|
|
+ * text.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotation *fullTextAnnotation;
|
|
|
+
|
|
|
+/** If present, image properties were extracted successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1ImageProperties *imagePropertiesAnnotation;
|
|
|
+
|
|
|
+/** If present, label detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1EntityAnnotation *> *labelAnnotations;
|
|
|
+
|
|
|
+/** If present, landmark detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1EntityAnnotation *> *landmarkAnnotations;
|
|
|
+
|
|
|
+/**
|
|
|
+ * If present, localized object detection has completed successfully.
|
|
|
+ * This will be sorted descending by confidence score.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1LocalizedObjectAnnotation *> *localizedObjectAnnotations;
|
|
|
+
|
|
|
+/** If present, logo detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1EntityAnnotation *> *logoAnnotations;
|
|
|
+
|
|
|
+/** If present, product search has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1ProductSearchResults *productSearchResults;
|
|
|
+
|
|
|
+/** If present, safe-search annotation has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation *safeSearchAnnotation;
|
|
|
+
|
|
|
+/** If present, text (OCR) detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1EntityAnnotation *> *textAnnotations;
|
|
|
+
|
|
|
+/** If present, web detection has completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1WebDetection *webDetection;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The response for a single offline file annotation request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1AsyncAnnotateFileResponse : GTLRObject
|
|
|
+
|
|
|
+/** The output location and metadata from AsyncAnnotateFileRequest. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1OutputConfig *outputConfig;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response to an async batch file annotation request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1AsyncBatchAnnotateFilesResponse : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The list of file annotation responses, one for each request in
|
|
|
+ * AsyncBatchAnnotateFilesRequest.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1AsyncAnnotateFileResponse *> *responses;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Metadata for the batch operations such as the current state.
|
|
|
+ * This is included in the `metadata` field of the `Operation` returned by the
|
|
|
+ * `GetOperation` call of the `google::longrunning::Operations` service.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1BatchOperationMetadata : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The time when the batch request is finished and
|
|
|
+ * google.longrunning.Operation.done is set to true.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *endTime;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The current state of the batch operation.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1BatchOperationMetadata_State_Cancelled
|
|
|
+ * The request is done after the longrunning.Operations.CancelOperation
|
|
|
+ * has
|
|
|
+ * been called by the user. Any records that were processed before the
|
|
|
+ * cancel command are output as specified in the request. (Value:
|
|
|
+ * "CANCELLED")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1BatchOperationMetadata_State_Failed
|
|
|
+ * The request is done and no item has been successfully processed.
|
|
|
+ * (Value: "FAILED")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1BatchOperationMetadata_State_Processing
|
|
|
+ * Request is actively being processed. (Value: "PROCESSING")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1BatchOperationMetadata_State_StateUnspecified
|
|
|
+ * Invalid. (Value: "STATE_UNSPECIFIED")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1BatchOperationMetadata_State_Successful
|
|
|
+ * The request is done and at least one item has been successfully
|
|
|
+ * processed. (Value: "SUCCESSFUL")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *state;
|
|
|
+
|
|
|
+/** The time when the batch request was submitted to the server. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *submitTime;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Logical element on the page.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1Block : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected block type (text, image etc) for this block.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1Block_BlockType_Barcode
|
|
|
+ * Barcode block. (Value: "BARCODE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1Block_BlockType_Picture
|
|
|
+ * Image block. (Value: "PICTURE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1Block_BlockType_Ruler
|
|
|
+ * Horizontal/vertical line box. (Value: "RULER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1Block_BlockType_Table Table
|
|
|
+ * block. (Value: "TABLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1Block_BlockType_Text Regular
|
|
|
+ * text block. (Value: "TEXT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1Block_BlockType_Unknown
|
|
|
+ * Unknown block type. (Value: "UNKNOWN")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *blockType;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding box for the block.
|
|
|
+ * The vertices are in the order of top-left, top-right, bottom-right,
|
|
|
+ * bottom-left. When a rotation of the bounding box is detected the rotation
|
|
|
+ * is represented as around the top-left corner as defined when the text is
|
|
|
+ * read in the 'natural' orientation.
|
|
|
+ * For example:
|
|
|
+ * * when the text is horizontal it might look like:
|
|
|
+ * 0----1
|
|
|
+ * | |
|
|
|
+ * 3----2
|
|
|
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
|
|
|
+ * 2----3
|
|
|
+ * | |
|
|
|
+ * 1----0
|
|
|
+ * and the vertex order will still be (0, 1, 2, 3).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1BoundingPoly *boundingBox;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results on the block. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/** List of paragraphs in this block (if this blocks is of type text). */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1Paragraph *> *paragraphs;
|
|
|
+
|
|
|
+/** Additional information detected for the block. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationTextProperty *property;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A bounding polygon for the detected image annotation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1BoundingPoly : GTLRObject
|
|
|
+
|
|
|
+/** The bounding polygon normalized vertices. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1NormalizedVertex *> *normalizedVertices;
|
|
|
+
|
|
|
+/** The bounding polygon vertices. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1Vertex *> *vertices;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Color information consists of RGB channels, score, and the fraction of
|
|
|
+ * the image that the color occupies in the image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1ColorInfo : GTLRObject
|
|
|
+
|
|
|
+/** RGB components of the color. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_Color *color;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The fraction of pixels the color occupies in the image.
|
|
|
+ * Value in range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *pixelFraction;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Image-specific score for this color. Value in range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Single crop hint that is used to generate a new crop when serving an image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1CropHint : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding polygon for the crop region. The coordinates of the bounding
|
|
|
+ * box are in the original image's scale.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of this being a salient region. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Fraction of importance of this salient region with respect to the original
|
|
|
+ * image.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *importanceFraction;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of crop hints that are used to generate new crops when serving images.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1CropHintsAnnotation : GTLRObject
|
|
|
+
|
|
|
+/** Crop hint results. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1CropHint *> *cropHints;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of dominant colors and their corresponding scores.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1DominantColorsAnnotation : GTLRObject
|
|
|
+
|
|
|
+/** RGB color values with their score and pixel fraction. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1ColorInfo *> *colors;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of detected entity features.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1EntityAnnotation : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Image region to which this entity belongs. Not produced
|
|
|
+ * for `LABEL_DETECTION` features.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * **Deprecated. Use `score` instead.**
|
|
|
+ * The accuracy of the entity detection in an image.
|
|
|
+ * For example, for an image in which the "Eiffel Tower" entity is detected,
|
|
|
+ * this field represents the confidence that there is a tower in the query
|
|
|
+ * image. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Entity textual description, expressed in its `locale` language.
|
|
|
+ *
|
|
|
+ * Remapped to 'descriptionProperty' to avoid NSObject's 'description'.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *descriptionProperty;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The language code for the locale in which the entity textual
|
|
|
+ * `description` is expressed.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *locale;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The location information for the detected entity. Multiple
|
|
|
+ * `LocationInfo` elements can be present because one location may
|
|
|
+ * indicate the location of the scene in the image, and another location
|
|
|
+ * may indicate the location of the place where the image was taken.
|
|
|
+ * Location information is usually present for landmarks.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1LocationInfo *> *locations;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Opaque entity ID. Some IDs may be available in
|
|
|
+ * [Google Knowledge Graph Search
|
|
|
+ * API](https://developers.google.com/knowledge-graph/).
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *mid;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Some entities may have optional user-supplied `Property` (name/value)
|
|
|
+ * fields, such a score or string that qualifies the entity.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1Property *> *properties;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Overall score of the result. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The relevancy of the ICA (Image Content Annotation) label to the
|
|
|
+ * image. For example, the relevancy of "tower" is likely higher to an image
|
|
|
+ * containing the detected "Eiffel Tower" than to an image containing a
|
|
|
+ * detected distant towering building, even though the confidence that
|
|
|
+ * there is a tower in each image may be the same. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *topicality;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A face annotation object contains the results of face detection.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Anger likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_AngerLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_AngerLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_AngerLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_AngerLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_AngerLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_AngerLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *angerLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Blurred likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_BlurredLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_BlurredLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_BlurredLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_BlurredLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_BlurredLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_BlurredLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *blurredLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding polygon around the face. The coordinates of the bounding box
|
|
|
+ * are in the original image's scale.
|
|
|
+ * The bounding box is computed to "frame" the face in accordance with human
|
|
|
+ * expectations. It is based on the landmarker results.
|
|
|
+ * Note that one or more x and/or y coordinates may not be generated in the
|
|
|
+ * `BoundingPoly` (the polygon will be unbounded) if only a partial face
|
|
|
+ * appears in the image to be annotated.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detection confidence. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *detectionConfidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The `fd_bounding_poly` bounding polygon is tighter than the
|
|
|
+ * `boundingPoly`, and encloses only the skin part of the face. Typically, it
|
|
|
+ * is used to eliminate the face from any image analysis that detects the
|
|
|
+ * "amount of skin" visible in an image. It is not based on the
|
|
|
+ * landmarker results, only on the initial face detection, hence
|
|
|
+ * the <code>fd</code> (face detection) prefix.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1BoundingPoly *fdBoundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Headwear likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_HeadwearLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_HeadwearLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_HeadwearLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_HeadwearLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_HeadwearLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_HeadwearLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *headwearLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Joy likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_JoyLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_JoyLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_JoyLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_JoyLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_JoyLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_JoyLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *joyLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Face landmarking confidence. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *landmarkingConfidence;
|
|
|
+
|
|
|
+/** Detected face landmarks. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark *> *landmarks;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Yaw angle, which indicates the leftward/rightward angle that the face is
|
|
|
+ * pointing relative to the vertical plane perpendicular to the image. Range
|
|
|
+ * [-180,180].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *panAngle;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Roll angle, which indicates the amount of clockwise/anti-clockwise rotation
|
|
|
+ * of the face relative to the image vertical about the axis perpendicular to
|
|
|
+ * the face. Range [-180,180].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *rollAngle;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Sorrow likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SorrowLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SorrowLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SorrowLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SorrowLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SorrowLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SorrowLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *sorrowLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Surprise likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SurpriseLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SurpriseLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SurpriseLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SurpriseLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SurpriseLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_SurpriseLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *surpriseLikelihood;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Pitch angle, which indicates the upwards/downwards angle that the face is
|
|
|
+ * pointing relative to the image's horizontal plane. Range [-180,180].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *tiltAngle;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Under-exposed likelihood.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_UnderExposedLikelihood_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_UnderExposedLikelihood_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_UnderExposedLikelihood_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_UnderExposedLikelihood_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_UnderExposedLikelihood_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotation_UnderExposedLikelihood_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *underExposedLikelihood;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A face-specific landmark (for example, a face feature).
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark : GTLRObject
|
|
|
+
|
|
|
+/** Face landmark position. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1Position *position;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Face landmark type.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_ChinGnathion
|
|
|
+ * Chin gnathion. (Value: "CHIN_GNATHION")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_ChinLeftGonion
|
|
|
+ * Chin left gonion. (Value: "CHIN_LEFT_GONION")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_ChinRightGonion
|
|
|
+ * Chin right gonion. (Value: "CHIN_RIGHT_GONION")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_ForeheadGlabella
|
|
|
+ * Forehead glabella. (Value: "FOREHEAD_GLABELLA")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftEarTragion
|
|
|
+ * Left ear tragion. (Value: "LEFT_EAR_TRAGION")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftEye
|
|
|
+ * Left eye. (Value: "LEFT_EYE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftEyeBottomBoundary
|
|
|
+ * Left eye, bottom boundary. (Value: "LEFT_EYE_BOTTOM_BOUNDARY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftEyebrowUpperMidpoint
|
|
|
+ * Left eyebrow, upper midpoint. (Value: "LEFT_EYEBROW_UPPER_MIDPOINT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftEyeLeftCorner
|
|
|
+ * Left eye, left corner. (Value: "LEFT_EYE_LEFT_CORNER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftEyePupil
|
|
|
+ * Left eye pupil. (Value: "LEFT_EYE_PUPIL")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftEyeRightCorner
|
|
|
+ * Left eye, right corner. (Value: "LEFT_EYE_RIGHT_CORNER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftEyeTopBoundary
|
|
|
+ * Left eye, top boundary. (Value: "LEFT_EYE_TOP_BOUNDARY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftOfLeftEyebrow
|
|
|
+ * Left of left eyebrow. (Value: "LEFT_OF_LEFT_EYEBROW")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LeftOfRightEyebrow
|
|
|
+ * Left of right eyebrow. (Value: "LEFT_OF_RIGHT_EYEBROW")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_LowerLip
|
|
|
+ * Lower lip. (Value: "LOWER_LIP")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_MidpointBetweenEyes
|
|
|
+ * Midpoint between eyes. (Value: "MIDPOINT_BETWEEN_EYES")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_MouthCenter
|
|
|
+ * Mouth center. (Value: "MOUTH_CENTER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_MouthLeft
|
|
|
+ * Mouth left. (Value: "MOUTH_LEFT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_MouthRight
|
|
|
+ * Mouth right. (Value: "MOUTH_RIGHT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_NoseBottomCenter
|
|
|
+ * Nose, bottom center. (Value: "NOSE_BOTTOM_CENTER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_NoseBottomLeft
|
|
|
+ * Nose, bottom left. (Value: "NOSE_BOTTOM_LEFT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_NoseBottomRight
|
|
|
+ * Nose, bottom right. (Value: "NOSE_BOTTOM_RIGHT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_NoseTip
|
|
|
+ * Nose tip. (Value: "NOSE_TIP")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightEarTragion
|
|
|
+ * Right ear tragion. (Value: "RIGHT_EAR_TRAGION")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightEye
|
|
|
+ * Right eye. (Value: "RIGHT_EYE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightEyeBottomBoundary
|
|
|
+ * Right eye, bottom boundary. (Value: "RIGHT_EYE_BOTTOM_BOUNDARY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightEyebrowUpperMidpoint
|
|
|
+ * Right eyebrow, upper midpoint. (Value: "RIGHT_EYEBROW_UPPER_MIDPOINT")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightEyeLeftCorner
|
|
|
+ * Right eye, left corner. (Value: "RIGHT_EYE_LEFT_CORNER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightEyePupil
|
|
|
+ * Right eye pupil. (Value: "RIGHT_EYE_PUPIL")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightEyeRightCorner
|
|
|
+ * Right eye, right corner. (Value: "RIGHT_EYE_RIGHT_CORNER")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightEyeTopBoundary
|
|
|
+ * Right eye, top boundary. (Value: "RIGHT_EYE_TOP_BOUNDARY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightOfLeftEyebrow
|
|
|
+ * Right of left eyebrow. (Value: "RIGHT_OF_LEFT_EYEBROW")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_RightOfRightEyebrow
|
|
|
+ * Right of right eyebrow. (Value: "RIGHT_OF_RIGHT_EYEBROW")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_UnknownLandmark
|
|
|
+ * Unknown face landmark detected. Should not be filled. (Value:
|
|
|
+ * "UNKNOWN_LANDMARK")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1FaceAnnotationLandmark_Type_UpperLip
|
|
|
+ * Upper lip. (Value: "UPPER_LIP")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *type;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The Google Cloud Storage location where the output will be written to.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1GcsDestination : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Google Cloud Storage URI where the results will be stored. Results will
|
|
|
+ * be in JSON format and preceded by its corresponding input URI. This field
|
|
|
+ * can either represent a single file, or a prefix for multiple outputs.
|
|
|
+ * Prefixes must end in a `/`.
|
|
|
+ * Examples:
|
|
|
+ * * File: gs://bucket-name/filename.json
|
|
|
+ * * Prefix: gs://bucket-name/prefix/here/
|
|
|
+ * * File: gs://bucket-name/prefix/here
|
|
|
+ * If multiple outputs, each response is still AnnotateFileResponse, each of
|
|
|
+ * which contains some subset of the full list of AnnotateImageResponse.
|
|
|
+ * Multiple outputs can happen if, for example, the output JSON is too large
|
|
|
+ * and overflows into multiple sharded files.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *uri;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The Google Cloud Storage location where the input will be read from.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1GcsSource : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Google Cloud Storage URI for the input file. This must only be a
|
|
|
+ * Google Cloud Storage object. Wildcards are not currently supported.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *uri;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * If an image was produced from a file (e.g. a PDF), this message gives
|
|
|
+ * information about the source of that image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1ImageAnnotationContext : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * If the file was a PDF or TIFF, this field gives the page number within
|
|
|
+ * the file used to produce the image.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *pageNumber;
|
|
|
+
|
|
|
+/** The URI of the file used to produce the image. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *uri;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Stores image properties, such as dominant colors.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1ImageProperties : GTLRObject
|
|
|
+
|
|
|
+/** If present, dominant colors completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1DominantColorsAnnotation *dominantColors;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response message for the `ImportProductSets` method.
|
|
|
+ * This message is returned by the
|
|
|
+ * google.longrunning.Operations.GetOperation method in the returned
|
|
|
+ * google.longrunning.Operation.response field.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1ImportProductSetsResponse : GTLRObject
|
|
|
+
|
|
|
+/** The list of reference_images that are imported successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1ReferenceImage *> *referenceImages;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The rpc status for each ImportProductSet request, including both successes
|
|
|
+ * and errors.
|
|
|
+ * The number of statuses here matches the number of lines in the csv file,
|
|
|
+ * and statuses[i] stores the success or failure status of processing the i-th
|
|
|
+ * line of the csv, starting from line 0.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Status *> *statuses;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The desired input location and metadata.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1InputConfig : GTLRObject
|
|
|
+
|
|
|
+/** The Google Cloud Storage location to read the input from. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1GcsSource *gcsSource;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The type of the file. Currently only "application/pdf" and "image/tiff"
|
|
|
+ * are supported. Wildcards are not supported.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *mimeType;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of detected objects with bounding boxes.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1LocalizedObjectAnnotation : GTLRObject
|
|
|
+
|
|
|
+/** Image region to which this object belongs. This must be populated. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The BCP-47 language code, such as "en-US" or "sr-Latn". For more
|
|
|
+ * information, see
|
|
|
+ * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *languageCode;
|
|
|
+
|
|
|
+/** Object ID that should align with EntityAnnotation mid. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *mid;
|
|
|
+
|
|
|
+/** Object name, expressed in its `language_code` language. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *name;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Score of the result. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected entity location information.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1LocationInfo : GTLRObject
|
|
|
+
|
|
|
+/** lat/long location coordinates. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_LatLng *latLng;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A vertex represents a 2D point in the image.
|
|
|
+ * NOTE: the normalized vertex coordinates are relative to the original image
|
|
|
+ * and range from 0 to 1.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1NormalizedVertex : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * X coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *x;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Y coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *y;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Contains metadata for the BatchAnnotateImages operation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1OperationMetadata : GTLRObject
|
|
|
+
|
|
|
+/** The time when the batch request was received. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *createTime;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Current state of the batch operation.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1OperationMetadata_State_Cancelled
|
|
|
+ * The batch processing was cancelled. (Value: "CANCELLED")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1OperationMetadata_State_Created
|
|
|
+ * Request is received. (Value: "CREATED")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1OperationMetadata_State_Done
|
|
|
+ * The batch processing is done. (Value: "DONE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1OperationMetadata_State_Running
|
|
|
+ * Request is actively being processed. (Value: "RUNNING")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1OperationMetadata_State_StateUnspecified
|
|
|
+ * Invalid. (Value: "STATE_UNSPECIFIED")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *state;
|
|
|
+
|
|
|
+/** The time when the operation result was last updated. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *updateTime;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The desired output location and metadata.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1OutputConfig : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The max number of response protos to put into each output JSON file on
|
|
|
+ * Google Cloud Storage.
|
|
|
+ * The valid range is [1, 100]. If not specified, the default value is 20.
|
|
|
+ * For example, for one pdf file with 100 pages, 100 response protos will
|
|
|
+ * be generated. If `batch_size` = 20, then 5 json files each
|
|
|
+ * containing 20 response protos will be written under the prefix
|
|
|
+ * `gcs_destination`.`uri`.
|
|
|
+ * Currently, batch_size only applies to GcsDestination, with potential future
|
|
|
+ * support for other output configurations.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *batchSize;
|
|
|
+
|
|
|
+/** The Google Cloud Storage location to write the output(s) to. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1GcsDestination *gcsDestination;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected page from OCR.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1Page : GTLRObject
|
|
|
+
|
|
|
+/** List of blocks of text, images etc on this page. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1Block *> *blocks;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results on the page. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Page height. For PDFs the unit is points. For images (including
|
|
|
+ * TIFFs) the unit is pixels.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *height;
|
|
|
+
|
|
|
+/** Additional information detected on the page. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationTextProperty *property;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Page width. For PDFs the unit is points. For images (including
|
|
|
+ * TIFFs) the unit is pixels.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *width;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Structural unit of text representing a number of words in certain order.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1Paragraph : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding box for the paragraph.
|
|
|
+ * The vertices are in the order of top-left, top-right, bottom-right,
|
|
|
+ * bottom-left. When a rotation of the bounding box is detected the rotation
|
|
|
+ * is represented as around the top-left corner as defined when the text is
|
|
|
+ * read in the 'natural' orientation.
|
|
|
+ * For example:
|
|
|
+ * * when the text is horizontal it might look like:
|
|
|
+ * 0----1
|
|
|
+ * | |
|
|
|
+ * 3----2
|
|
|
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
|
|
|
+ * 2----3
|
|
|
+ * | |
|
|
|
+ * 1----0
|
|
|
+ * and the vertex order will still be (0, 1, 2, 3).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1BoundingPoly *boundingBox;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results for the paragraph. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/** Additional information detected for the paragraph. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationTextProperty *property;
|
|
|
+
|
|
|
+/** List of words in this paragraph. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1Word *> *words;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A 3D position in the image, used primarily for Face detection landmarks.
|
|
|
+ * A valid Position must have both x and y coordinates.
|
|
|
+ * The position coordinates are in the same scale as the original image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1Position : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * X coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *x;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Y coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *y;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Z coordinate (or depth).
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *z;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A Product contains ReferenceImages.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1Product : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * User-provided metadata to be stored with this product. Must be at most 4096
|
|
|
+ * characters long.
|
|
|
+ *
|
|
|
+ * Remapped to 'descriptionProperty' to avoid NSObject's 'description'.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *descriptionProperty;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The user-provided name for this Product. Must not be empty. Must be at most
|
|
|
+ * 4096 characters long.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *displayName;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The resource name of the product.
|
|
|
+ * Format is:
|
|
|
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
|
|
|
+ * This field is ignored when creating a product.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *name;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The category for the product identified by the reference image. This should
|
|
|
+ * be either "homegoods", "apparel", or "toys".
|
|
|
+ * This field is immutable.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *productCategory;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Key-value pairs that can be attached to a product. At query time,
|
|
|
+ * constraints can be specified based on the product_labels.
|
|
|
+ * Note that integer values can be provided as strings, e.g. "1199". Only
|
|
|
+ * strings with integer values can match a range-based restriction which is
|
|
|
+ * to be supported soon.
|
|
|
+ * Multiple values can be assigned to the same key. One product may have up to
|
|
|
+ * 100 product_labels.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1ProductKeyValue *> *productLabels;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A product label represented as a key-value pair.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1ProductKeyValue : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The key of the label attached to the product. Cannot be empty and cannot
|
|
|
+ * exceed 128 bytes.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *key;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The value of the label attached to the product. Cannot be empty and
|
|
|
+ * cannot exceed 128 bytes.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *value;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Results for a product search request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1ProductSearchResults : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Timestamp of the index which provided these results. Changes made after
|
|
|
+ * this time are not reflected in the current results.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *indexTime;
|
|
|
+
|
|
|
+/**
|
|
|
+ * List of results grouped by products detected in the query image. Each entry
|
|
|
+ * corresponds to one bounding polygon in the query image, and contains the
|
|
|
+ * matching products specific to that region. There may be duplicate product
|
|
|
+ * matches in the union of all the per-product results.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1ProductSearchResultsGroupedResult *> *productGroupedResults;
|
|
|
+
|
|
|
+/** List of results, one for each product match. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1ProductSearchResultsResult *> *results;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Information about the products similar to a single product in a query
|
|
|
+ * image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1ProductSearchResultsGroupedResult : GTLRObject
|
|
|
+
|
|
|
+/** The bounding polygon around the product detected in the query image. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/** List of results, one for each product match. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1ProductSearchResultsResult *> *results;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Information about a product.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1ProductSearchResultsResult : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The resource name of the image from the product that is the closest match
|
|
|
+ * to the query.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *image;
|
|
|
+
|
|
|
+/** The Product. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1Product *product;
|
|
|
+
|
|
|
+/**
|
|
|
+ * A confidence level on the match, ranging from 0 (no confidence) to
|
|
|
+ * 1 (full confidence).
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A `Property` consists of a user-supplied name/value pair.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1Property : GTLRObject
|
|
|
+
|
|
|
+/** Name of the property. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *name;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Value of numeric properties.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of unsignedLongLongValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *uint64Value;
|
|
|
+
|
|
|
+/** Value of the property. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *value;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A `ReferenceImage` represents a product image and its associated metadata,
|
|
|
+ * such as bounding boxes.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1ReferenceImage : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Bounding polygons around the areas of interest in the reference image.
|
|
|
+ * Optional. If this field is empty, the system will try to detect regions of
|
|
|
+ * interest. At most 10 bounding polygons will be used.
|
|
|
+ * The provided shape is converted into a non-rotated rectangle. Once
|
|
|
+ * converted, the small edge of the rectangle must be greater than or equal
|
|
|
+ * to 300 pixels. The aspect ratio must be 1:4 or less (i.e. 1:3 is ok; 1:5
|
|
|
+ * is not).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1BoundingPoly *> *boundingPolys;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The resource name of the reference image.
|
|
|
+ * Format is:
|
|
|
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`.
|
|
|
+ * This field is ignored when creating a reference image.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *name;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The Google Cloud Storage URI of the reference image.
|
|
|
+ * The URI must start with `gs://`.
|
|
|
+ * Required.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *uri;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of features pertaining to the image, computed by computer vision
|
|
|
+ * methods over safe-search verticals (for example, adult, spoof, medical,
|
|
|
+ * violence).
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Represents the adult content likelihood for the image. Adult content may
|
|
|
+ * contain elements such as nudity, pornographic images or cartoons, or
|
|
|
+ * sexual activities.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Adult_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Adult_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Adult_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Adult_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Adult_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Adult_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *adult;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Likelihood that this is a medical image.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Medical_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Medical_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Medical_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Medical_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Medical_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Medical_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *medical;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Likelihood that the request image contains racy content. Racy content may
|
|
|
+ * include (but is not limited to) skimpy or sheer clothing, strategically
|
|
|
+ * covered nudity, lewd or provocative poses, or close-ups of sensitive
|
|
|
+ * body areas.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Racy_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Racy_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Racy_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Racy_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Racy_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Racy_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *racy;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Spoof likelihood. The likelihood that an modification
|
|
|
+ * was made to the image's canonical version to make it appear
|
|
|
+ * funny or offensive.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Spoof_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Spoof_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Spoof_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Spoof_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Spoof_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Spoof_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *spoof;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Likelihood that this image contains violent content.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Violence_Likely
|
|
|
+ * It is likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Violence_Possible
|
|
|
+ * It is possible that the image belongs to the specified vertical.
|
|
|
+ * (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Violence_Unknown
|
|
|
+ * Unknown likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Violence_Unlikely
|
|
|
+ * It is unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Violence_VeryLikely
|
|
|
+ * It is very likely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1SafeSearchAnnotation_Violence_VeryUnlikely
|
|
|
+ * It is very unlikely that the image belongs to the specified vertical.
|
|
|
+ * (Value: "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *violence;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A single symbol representation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1Symbol : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding box for the symbol.
|
|
|
+ * The vertices are in the order of top-left, top-right, bottom-right,
|
|
|
+ * bottom-left. When a rotation of the bounding box is detected the rotation
|
|
|
+ * is represented as around the top-left corner as defined when the text is
|
|
|
+ * read in the 'natural' orientation.
|
|
|
+ * For example:
|
|
|
+ * * when the text is horizontal it might look like:
|
|
|
+ * 0----1
|
|
|
+ * | |
|
|
|
+ * 3----2
|
|
|
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
|
|
|
+ * 2----3
|
|
|
+ * | |
|
|
|
+ * 1----0
|
|
|
+ * and the vertice order will still be (0, 1, 2, 3).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1BoundingPoly *boundingBox;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results for the symbol. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/** Additional information detected for the symbol. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationTextProperty *property;
|
|
|
+
|
|
|
+/** The actual UTF-8 representation of the symbol. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *text;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * TextAnnotation contains a structured representation of OCR extracted text.
|
|
|
+ * The hierarchy of an OCR extracted text structure is like this:
|
|
|
+ * TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
|
|
|
+ * Each structural component, starting from Page, may further have their own
|
|
|
+ * properties. Properties describe detected languages, breaks etc.. Please
|
|
|
+ * refer
|
|
|
+ * to the TextAnnotation.TextProperty message definition below for more
|
|
|
+ * detail.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotation : GTLRObject
|
|
|
+
|
|
|
+/** List of pages detected by OCR. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1Page *> *pages;
|
|
|
+
|
|
|
+/** UTF-8 text detected on the pages. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *text;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected start or end of a structural component.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedBreak : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * True if break prepends the element.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of boolValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *isPrefix;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected break type.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedBreak_Type_EolSureSpace
|
|
|
+ * Line-wrapping break. (Value: "EOL_SURE_SPACE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedBreak_Type_Hyphen
|
|
|
+ * End-line hyphen that is not present in text; does not co-occur with
|
|
|
+ * `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`. (Value: "HYPHEN")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedBreak_Type_LineBreak
|
|
|
+ * Line break that ends a paragraph. (Value: "LINE_BREAK")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedBreak_Type_Space
|
|
|
+ * Regular space. (Value: "SPACE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedBreak_Type_SureSpace
|
|
|
+ * Sure space (very wide). (Value: "SURE_SPACE")
|
|
|
+ * @arg @c kGTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedBreak_Type_Unknown
|
|
|
+ * Unknown break label type. (Value: "UNKNOWN")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *type;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected language for a structural component.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedLanguage : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of detected language. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The BCP-47 language code, such as "en-US" or "sr-Latn". For more
|
|
|
+ * information, see
|
|
|
+ * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *languageCode;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Additional information detected on the structural component.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationTextProperty : GTLRObject
|
|
|
+
|
|
|
+/** Detected start or end of a text segment. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedBreak *detectedBreak;
|
|
|
+
|
|
|
+/** A list of detected languages together with confidence. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationDetectedLanguage *> *detectedLanguages;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A vertex represents a 2D point in the image.
|
|
|
+ * NOTE: the vertex coordinates are in the same scale as the original image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1Vertex : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * X coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *x;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Y coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *y;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Relevant information for the image from the Internet.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1WebDetection : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The service's best guess as to the topic of the request image.
|
|
|
+ * Inferred from similar images on the open web.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1WebDetectionWebLabel *> *bestGuessLabels;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Fully matching images from the Internet.
|
|
|
+ * Can include resized copies of the query image.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1WebDetectionWebImage *> *fullMatchingImages;
|
|
|
+
|
|
|
+/** Web pages containing the matching images from the Internet. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1WebDetectionWebPage *> *pagesWithMatchingImages;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Partial matching images from the Internet.
|
|
|
+ * Those images are similar enough to share some key-point features. For
|
|
|
+ * example an original image will likely have partial matching for its crops.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1WebDetectionWebImage *> *partialMatchingImages;
|
|
|
+
|
|
|
+/** The visually similar image results. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1WebDetectionWebImage *> *visuallySimilarImages;
|
|
|
+
|
|
|
+/** Deduced entities from similar images on the Internet. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1WebDetectionWebEntity *> *webEntities;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Entity deduced from similar images on the Internet.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1WebDetectionWebEntity : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Canonical description of the entity, in English.
|
|
|
+ *
|
|
|
+ * Remapped to 'descriptionProperty' to avoid NSObject's 'description'.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *descriptionProperty;
|
|
|
+
|
|
|
+/** Opaque entity ID. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *entityId;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Overall relevancy score for the entity.
|
|
|
+ * Not normalized and not comparable across different image queries.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Metadata for online images.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1WebDetectionWebImage : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * (Deprecated) Overall relevancy score for the image.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+/** The result image URL. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *url;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Label to provide extra metadata for the web detection.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1WebDetectionWebLabel : GTLRObject
|
|
|
+
|
|
|
+/** Label for extra metadata. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *label;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
|
|
|
+ * For more information, see
|
|
|
+ * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *languageCode;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Metadata for web pages.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1WebDetectionWebPage : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Fully matching images on the page.
|
|
|
+ * Can include resized copies of the query image.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1WebDetectionWebImage *> *fullMatchingImages;
|
|
|
+
|
|
|
+/** Title for the web page, may contain HTML markups. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *pageTitle;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Partial matching images on the page.
|
|
|
+ * Those images are similar enough to share some key-point features. For
|
|
|
+ * example an original image will likely have partial matching for its
|
|
|
+ * crops.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1WebDetectionWebImage *> *partialMatchingImages;
|
|
|
+
|
|
|
+/**
|
|
|
+ * (Deprecated) Overall relevancy score for the web page.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+/** The result web page URL. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *url;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A word representation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GoogleCloudVisionV1p3beta1Word : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding box for the word.
|
|
|
+ * The vertices are in the order of top-left, top-right, bottom-right,
|
|
|
+ * bottom-left. When a rotation of the bounding box is detected the rotation
|
|
|
+ * is represented as around the top-left corner as defined when the text is
|
|
|
+ * read in the 'natural' orientation.
|
|
|
+ * For example:
|
|
|
+ * * when the text is horizontal it might look like:
|
|
|
+ * 0----1
|
|
|
+ * | |
|
|
|
+ * 3----2
|
|
|
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
|
|
|
+ * 2----3
|
|
|
+ * | |
|
|
|
+ * 1----0
|
|
|
+ * and the vertex order will still be (0, 1, 2, 3).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1BoundingPoly *boundingBox;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results for the word. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/** Additional information detected for the word. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GoogleCloudVisionV1p3beta1TextAnnotationTextProperty *property;
|
|
|
+
|
|
|
+/**
|
|
|
+ * List of symbols in the word.
|
|
|
+ * The order of the symbols follows the natural reading order.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GoogleCloudVisionV1p3beta1Symbol *> *symbols;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Information about the products similar to a single product in a query
|
|
|
+ * image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_GroupedResult : GTLRObject
|
|
|
+
|
|
|
+/** The bounding polygon around the product detected in the query image. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/** List of results, one for each product match. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Result *> *results;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Client image to perform Google Cloud Vision API tasks over.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Image : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Image content, represented as a stream of bytes.
|
|
|
+ * Note: As with all `bytes` fields, protobuffers use a pure binary
|
|
|
+ * representation, whereas JSON representations use base64.
|
|
|
+ *
|
|
|
+ * Contains encoded binary data; GTLRBase64 can encode/decode (probably
|
|
|
+ * web-safe format).
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *content;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Google Cloud Storage image location, or publicly-accessible image
|
|
|
+ * URL. If both `content` and `source` are provided for an image, `content`
|
|
|
+ * takes precedence and is used to perform the image annotation request.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_ImageSource *source;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * If an image was produced from a file (e.g. a PDF), this message gives
|
|
|
+ * information about the source of that image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_ImageAnnotationContext : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * If the file was a PDF or TIFF, this field gives the page number within
|
|
|
+ * the file used to produce the image.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *pageNumber;
|
|
|
+
|
|
|
+/** The URI of the file used to produce the image. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *uri;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Image context and/or feature-specific parameters.
|
|
|
+ */
|
|
|
+@interface GTLRVision_ImageContext : GTLRObject
|
|
|
+
|
|
|
+/** Parameters for crop hints annotation request. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_CropHintsParams *cropHintsParams;
|
|
|
+
|
|
|
+/**
|
|
|
+ * List of languages to use for TEXT_DETECTION. In most cases, an empty value
|
|
|
+ * yields the best results since it enables automatic language detection. For
|
|
|
+ * languages based on the Latin alphabet, setting `language_hints` is not
|
|
|
+ * needed. In rare cases, when the language of the text in the image is known,
|
|
|
+ * setting a hint will help get better results (although it will be a
|
|
|
+ * significant hindrance if the hint is wrong). Text detection returns an
|
|
|
+ * error if one or more of the specified languages is not one of the
|
|
|
+ * [supported languages](/vision/docs/languages).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<NSString *> *languageHints;
|
|
|
+
|
|
|
+/** Not used. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_LatLongRect *latLongRect;
|
|
|
+
|
|
|
+/** Parameters for product search. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_ProductSearchParams *productSearchParams;
|
|
|
+
|
|
|
+/** Parameters for web detection. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_WebDetectionParams *webDetectionParams;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Stores image properties, such as dominant colors.
|
|
|
+ */
|
|
|
+@interface GTLRVision_ImageProperties : GTLRObject
|
|
|
+
|
|
|
+/** If present, dominant colors completed successfully. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_DominantColorsAnnotation *dominantColors;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * External image source (Google Cloud Storage or web URL image location).
|
|
|
+ */
|
|
|
+@interface GTLRVision_ImageSource : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * **Use `image_uri` instead.**
|
|
|
+ * The Google Cloud Storage URI of the form
|
|
|
+ * `gs://bucket_name/object_name`. Object versioning is not supported. See
|
|
|
+ * [Google Cloud Storage Request
|
|
|
+ * URIs](https://cloud.google.com/storage/docs/reference-uris) for more info.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *gcsImageUri;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The URI of the source image. Can be either:
|
|
|
+ * 1. A Google Cloud Storage URI of the form
|
|
|
+ * `gs://bucket_name/object_name`. Object versioning is not supported. See
|
|
|
+ * [Google Cloud Storage Request
|
|
|
+ * URIs](https://cloud.google.com/storage/docs/reference-uris) for more
|
|
|
+ * info.
|
|
|
+ * 2. A publicly-accessible image HTTP/HTTPS URL. When fetching images from
|
|
|
+ * HTTP/HTTPS URLs, Google cannot guarantee that the request will be
|
|
|
+ * completed. Your request may fail if the specified host denies the
|
|
|
+ * request (e.g. due to request throttling or DOS prevention), or if Google
|
|
|
+ * throttles requests to the site for abuse prevention. You should not
|
|
|
+ * depend on externally-hosted images for production applications.
|
|
|
+ * When both `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
|
|
|
+ * precedence.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *imageUri;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The Google Cloud Storage location for a csv file which preserves a list of
|
|
|
+ * ImportProductSetRequests in each line.
|
|
|
+ */
|
|
|
+@interface GTLRVision_ImportProductSetsGcsSource : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The Google Cloud Storage URI of the input csv file.
|
|
|
+ * The URI must start with `gs://`.
|
|
|
+ * The format of the input csv file should be one image per line.
|
|
|
+ * In each line, there are 8 columns.
|
|
|
+ * 1. image-uri
|
|
|
+ * 2. image-id
|
|
|
+ * 3. product-set-id
|
|
|
+ * 4. product-id
|
|
|
+ * 5. product-category
|
|
|
+ * 6. product-display-name
|
|
|
+ * 7. labels
|
|
|
+ * 8. bounding-poly
|
|
|
+ * The `image-uri`, `product-set-id`, `product-id`, and `product-category`
|
|
|
+ * columns are required. All other columns are optional.
|
|
|
+ * If the `ProductSet` or `Product` specified by the `product-set-id` and
|
|
|
+ * `product-id` values does not exist, then the system will create a new
|
|
|
+ * `ProductSet` or `Product` for the image. In this case, the
|
|
|
+ * `product-display-name` column refers to
|
|
|
+ * display_name, the
|
|
|
+ * `product-category` column refers to
|
|
|
+ * product_category, and the
|
|
|
+ * `labels` column refers to product_labels.
|
|
|
+ * The `image-id` column is optional but must be unique if provided. If it is
|
|
|
+ * empty, the system will automatically assign a unique id to the image.
|
|
|
+ * The `product-display-name` column is optional. If it is empty, the system
|
|
|
+ * sets the display_name field for the product to a
|
|
|
+ * space (" "). You can update the `display_name` later by using the API.
|
|
|
+ * If a `Product` with the specified `product-id` already exists, then the
|
|
|
+ * system ignores the `product-display-name`, `product-category`, and `labels`
|
|
|
+ * columns.
|
|
|
+ * The `labels` column (optional) is a line containing a list of
|
|
|
+ * comma-separated key-value pairs, in the following format:
|
|
|
+ * "key_1=value_1,key_2=value_2,...,key_n=value_n"
|
|
|
+ * The `bounding-poly` column (optional) identifies one region of
|
|
|
+ * interest from the image in the same manner as `CreateReferenceImage`. If
|
|
|
+ * you do not specify the `bounding-poly` column, then the system will try to
|
|
|
+ * detect regions of interest automatically.
|
|
|
+ * At most one `bounding-poly` column is allowed per line. If the image
|
|
|
+ * contains multiple regions of interest, add a line to the CSV file that
|
|
|
+ * includes the same product information, and the `bounding-poly` values for
|
|
|
+ * each region of interest.
|
|
|
+ * The `bounding-poly` column must contain an even number of comma-separated
|
|
|
+ * numbers, in the format "p1_x,p1_y,p2_x,p2_y,...,pn_x,pn_y". Use
|
|
|
+ * non-negative integers for absolute bounding polygons, and float values
|
|
|
+ * in [0, 1] for normalized bounding polygons.
|
|
|
+ * The system will resize the image if the image resolution is too
|
|
|
+ * large to process (larger than 20MP).
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *csvFileUri;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The input content for the `ImportProductSets` method.
|
|
|
+ */
|
|
|
+@interface GTLRVision_ImportProductSetsInputConfig : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The Google Cloud Storage location for a csv file which preserves a list
|
|
|
+ * of ImportProductSetRequests in each line.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_ImportProductSetsGcsSource *gcsSource;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Request message for the `ImportProductSets` method.
|
|
|
+ */
|
|
|
+@interface GTLRVision_ImportProductSetsRequest : GTLRObject
|
|
|
+
|
|
|
+/** The input content for the list of requests. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_ImportProductSetsInputConfig *inputConfig;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response message for the `ImportProductSets` method.
|
|
|
+ * This message is returned by the
|
|
|
+ * google.longrunning.Operations.GetOperation method in the returned
|
|
|
+ * google.longrunning.Operation.response field.
|
|
|
+ */
|
|
|
+@interface GTLRVision_ImportProductSetsResponse : GTLRObject
|
|
|
+
|
|
|
+/** The list of reference_images that are imported successfully. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_ReferenceImage *> *referenceImages;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The rpc status for each ImportProductSet request, including both successes
|
|
|
+ * and errors.
|
|
|
+ * The number of statuses here matches the number of lines in the csv file,
|
|
|
+ * and statuses[i] stores the success or failure status of processing the i-th
|
|
|
+ * line of the csv, starting from line 0.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Status *> *statuses;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The desired input location and metadata.
|
|
|
+ */
|
|
|
+@interface GTLRVision_InputConfig : GTLRObject
|
|
|
+
|
|
|
+/** The Google Cloud Storage location to read the input from. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GcsSource *gcsSource;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The type of the file. Currently only "application/pdf" and "image/tiff"
|
|
|
+ * are supported. Wildcards are not supported.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *mimeType;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A product label represented as a key-value pair.
|
|
|
+ */
|
|
|
+@interface GTLRVision_KeyValue : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The key of the label attached to the product. Cannot be empty and cannot
|
|
|
+ * exceed 128 bytes.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *key;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The value of the label attached to the product. Cannot be empty and
|
|
|
+ * cannot exceed 128 bytes.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *value;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A face-specific landmark (for example, a face feature).
|
|
|
+ */
|
|
|
+@interface GTLRVision_Landmark : GTLRObject
|
|
|
+
|
|
|
+/** Face landmark position. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_Position *position;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Face landmark type.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_ChinGnathion Chin gnathion. (Value:
|
|
|
+ * "CHIN_GNATHION")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_ChinLeftGonion Chin left gonion. (Value:
|
|
|
+ * "CHIN_LEFT_GONION")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_ChinRightGonion Chin right gonion.
|
|
|
+ * (Value: "CHIN_RIGHT_GONION")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_ForeheadGlabella Forehead glabella.
|
|
|
+ * (Value: "FOREHEAD_GLABELLA")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_LeftEarTragion Left ear tragion. (Value:
|
|
|
+ * "LEFT_EAR_TRAGION")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_LeftEye Left eye. (Value: "LEFT_EYE")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_LeftEyeBottomBoundary Left eye, bottom
|
|
|
+ * boundary. (Value: "LEFT_EYE_BOTTOM_BOUNDARY")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_LeftEyebrowUpperMidpoint Left eyebrow,
|
|
|
+ * upper midpoint. (Value: "LEFT_EYEBROW_UPPER_MIDPOINT")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_LeftEyeLeftCorner Left eye, left corner.
|
|
|
+ * (Value: "LEFT_EYE_LEFT_CORNER")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_LeftEyePupil Left eye pupil. (Value:
|
|
|
+ * "LEFT_EYE_PUPIL")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_LeftEyeRightCorner Left eye, right
|
|
|
+ * corner. (Value: "LEFT_EYE_RIGHT_CORNER")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_LeftEyeTopBoundary Left eye, top
|
|
|
+ * boundary. (Value: "LEFT_EYE_TOP_BOUNDARY")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_LeftOfLeftEyebrow Left of left eyebrow.
|
|
|
+ * (Value: "LEFT_OF_LEFT_EYEBROW")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_LeftOfRightEyebrow Left of right
|
|
|
+ * eyebrow. (Value: "LEFT_OF_RIGHT_EYEBROW")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_LowerLip Lower lip. (Value: "LOWER_LIP")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_MidpointBetweenEyes Midpoint between
|
|
|
+ * eyes. (Value: "MIDPOINT_BETWEEN_EYES")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_MouthCenter Mouth center. (Value:
|
|
|
+ * "MOUTH_CENTER")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_MouthLeft Mouth left. (Value:
|
|
|
+ * "MOUTH_LEFT")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_MouthRight Mouth right. (Value:
|
|
|
+ * "MOUTH_RIGHT")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_NoseBottomCenter Nose, bottom center.
|
|
|
+ * (Value: "NOSE_BOTTOM_CENTER")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_NoseBottomLeft Nose, bottom left.
|
|
|
+ * (Value: "NOSE_BOTTOM_LEFT")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_NoseBottomRight Nose, bottom right.
|
|
|
+ * (Value: "NOSE_BOTTOM_RIGHT")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_NoseTip Nose tip. (Value: "NOSE_TIP")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_RightEarTragion Right ear tragion.
|
|
|
+ * (Value: "RIGHT_EAR_TRAGION")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_RightEye Right eye. (Value: "RIGHT_EYE")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_RightEyeBottomBoundary Right eye, bottom
|
|
|
+ * boundary. (Value: "RIGHT_EYE_BOTTOM_BOUNDARY")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_RightEyebrowUpperMidpoint Right eyebrow,
|
|
|
+ * upper midpoint. (Value: "RIGHT_EYEBROW_UPPER_MIDPOINT")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_RightEyeLeftCorner Right eye, left
|
|
|
+ * corner. (Value: "RIGHT_EYE_LEFT_CORNER")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_RightEyePupil Right eye pupil. (Value:
|
|
|
+ * "RIGHT_EYE_PUPIL")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_RightEyeRightCorner Right eye, right
|
|
|
+ * corner. (Value: "RIGHT_EYE_RIGHT_CORNER")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_RightEyeTopBoundary Right eye, top
|
|
|
+ * boundary. (Value: "RIGHT_EYE_TOP_BOUNDARY")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_RightOfLeftEyebrow Right of left
|
|
|
+ * eyebrow. (Value: "RIGHT_OF_LEFT_EYEBROW")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_RightOfRightEyebrow Right of right
|
|
|
+ * eyebrow. (Value: "RIGHT_OF_RIGHT_EYEBROW")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_UnknownLandmark Unknown face landmark
|
|
|
+ * detected. Should not be filled. (Value: "UNKNOWN_LANDMARK")
|
|
|
+ * @arg @c kGTLRVision_Landmark_Type_UpperLip Upper lip. (Value: "UPPER_LIP")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *type;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * An object representing a latitude/longitude pair. This is expressed as a
|
|
|
+ * pair
|
|
|
+ * of doubles representing degrees latitude and degrees longitude. Unless
|
|
|
+ * specified otherwise, this must conform to the
|
|
|
+ * <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84
|
|
|
+ * standard</a>. Values must be within normalized ranges.
|
|
|
+ */
|
|
|
+@interface GTLRVision_LatLng : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The latitude in degrees. It must be in the range [-90.0, +90.0].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of doubleValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *latitude;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The longitude in degrees. It must be in the range [-180.0, +180.0].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of doubleValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *longitude;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Rectangle determined by min and max `LatLng` pairs.
|
|
|
+ */
|
|
|
+@interface GTLRVision_LatLongRect : GTLRObject
|
|
|
+
|
|
|
+/** Max lat/long pair. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_LatLng *maxLatLng;
|
|
|
+
|
|
|
+/** Min lat/long pair. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_LatLng *minLatLng;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The response message for Operations.ListOperations.
|
|
|
+ *
|
|
|
+ * @note This class supports NSFastEnumeration and indexed subscripting over
|
|
|
+ * its "operations" property. If returned as the result of a query, it
|
|
|
+ * should support automatic pagination (when @c shouldFetchNextPages is
|
|
|
+ * enabled).
|
|
|
+ */
|
|
|
+@interface GTLRVision_ListOperationsResponse : GTLRCollectionObject
|
|
|
+
|
|
|
+/** The standard List next-page token. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *nextPageToken;
|
|
|
+
|
|
|
+/**
|
|
|
+ * A list of operations that matches the specified filter in the request.
|
|
|
+ *
|
|
|
+ * @note This property is used to support NSFastEnumeration and indexed
|
|
|
+ * subscripting on this class.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Operation *> *operations;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response message for the `ListProductSets` method.
|
|
|
+ *
|
|
|
+ * @note This class supports NSFastEnumeration and indexed subscripting over
|
|
|
+ * its "productSets" property. If returned as the result of a query, it
|
|
|
+ * should support automatic pagination (when @c shouldFetchNextPages is
|
|
|
+ * enabled).
|
|
|
+ */
|
|
|
+@interface GTLRVision_ListProductSetsResponse : GTLRCollectionObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Token to retrieve the next page of results, or empty if there are no more
|
|
|
+ * results in the list.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *nextPageToken;
|
|
|
+
|
|
|
+/**
|
|
|
+ * List of ProductSets.
|
|
|
+ *
|
|
|
+ * @note This property is used to support NSFastEnumeration and indexed
|
|
|
+ * subscripting on this class.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_ProductSet *> *productSets;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response message for the `ListProductsInProductSet` method.
|
|
|
+ *
|
|
|
+ * @note This class supports NSFastEnumeration and indexed subscripting over
|
|
|
+ * its "products" property. If returned as the result of a query, it
|
|
|
+ * should support automatic pagination (when @c shouldFetchNextPages is
|
|
|
+ * enabled).
|
|
|
+ */
|
|
|
+@interface GTLRVision_ListProductsInProductSetResponse : GTLRCollectionObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Token to retrieve the next page of results, or empty if there are no more
|
|
|
+ * results in the list.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *nextPageToken;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The list of Products.
|
|
|
+ *
|
|
|
+ * @note This property is used to support NSFastEnumeration and indexed
|
|
|
+ * subscripting on this class.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Product *> *products;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response message for the `ListProducts` method.
|
|
|
+ *
|
|
|
+ * @note This class supports NSFastEnumeration and indexed subscripting over
|
|
|
+ * its "products" property. If returned as the result of a query, it
|
|
|
+ * should support automatic pagination (when @c shouldFetchNextPages is
|
|
|
+ * enabled).
|
|
|
+ */
|
|
|
+@interface GTLRVision_ListProductsResponse : GTLRCollectionObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Token to retrieve the next page of results, or empty if there are no more
|
|
|
+ * results in the list.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *nextPageToken;
|
|
|
+
|
|
|
+/**
|
|
|
+ * List of products.
|
|
|
+ *
|
|
|
+ * @note This property is used to support NSFastEnumeration and indexed
|
|
|
+ * subscripting on this class.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Product *> *products;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Response message for the `ListReferenceImages` method.
|
|
|
+ *
|
|
|
+ * @note This class supports NSFastEnumeration and indexed subscripting over
|
|
|
+ * its "referenceImages" property. If returned as the result of a query,
|
|
|
+ * it should support automatic pagination (when @c shouldFetchNextPages
|
|
|
+ * is enabled).
|
|
|
+ */
|
|
|
+@interface GTLRVision_ListReferenceImagesResponse : GTLRCollectionObject
|
|
|
+
|
|
|
+/** The next_page_token returned from a previous List request, if any. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *nextPageToken;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The maximum number of items to return. Default 10, maximum 100.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *pageSize;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The list of reference images.
|
|
|
+ *
|
|
|
+ * @note This property is used to support NSFastEnumeration and indexed
|
|
|
+ * subscripting on this class.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_ReferenceImage *> *referenceImages;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of detected objects with bounding boxes.
|
|
|
+ */
|
|
|
+@interface GTLRVision_LocalizedObjectAnnotation : GTLRObject
|
|
|
+
|
|
|
+/** Image region to which this object belongs. This must be populated. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The BCP-47 language code, such as "en-US" or "sr-Latn". For more
|
|
|
+ * information, see
|
|
|
+ * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *languageCode;
|
|
|
+
|
|
|
+/** Object ID that should align with EntityAnnotation mid. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *mid;
|
|
|
+
|
|
|
+/** Object name, expressed in its `language_code` language. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *name;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Score of the result. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected entity location information.
|
|
|
+ */
|
|
|
+@interface GTLRVision_LocationInfo : GTLRObject
|
|
|
+
|
|
|
+/** lat/long location coordinates. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_LatLng *latLng;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A vertex represents a 2D point in the image.
|
|
|
+ * NOTE: the normalized vertex coordinates are relative to the original image
|
|
|
+ * and range from 0 to 1.
|
|
|
+ */
|
|
|
+@interface GTLRVision_NormalizedVertex : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * X coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *x;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Y coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *y;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * This resource represents a long-running operation that is the result of a
|
|
|
+ * network API call.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Operation : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * If the value is `false`, it means the operation is still in progress.
|
|
|
+ * If `true`, the operation is completed, and either `error` or `response` is
|
|
|
+ * available.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of boolValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *done;
|
|
|
+
|
|
|
+/** The error result of the operation in case of failure or cancellation. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_Status *error;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Service-specific metadata associated with the operation. It typically
|
|
|
+ * contains progress information and common metadata such as create time.
|
|
|
+ * Some services might not provide such metadata. Any method that returns a
|
|
|
+ * long-running operation should document the metadata type, if any.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_Operation_Metadata *metadata;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The server-assigned name, which is only unique within the same service that
|
|
|
+ * originally returns it. If you use the default HTTP mapping, the
|
|
|
+ * `name` should have the format of `operations/some/unique/name`.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *name;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The normal response of the operation in case of success. If the original
|
|
|
+ * method returns no data on success, such as `Delete`, the response is
|
|
|
+ * `google.protobuf.Empty`. If the original method is standard
|
|
|
+ * `Get`/`Create`/`Update`, the response should be the resource. For other
|
|
|
+ * methods, the response should have the type `XxxResponse`, where `Xxx`
|
|
|
+ * is the original method name. For example, if the original method name
|
|
|
+ * is `TakeSnapshot()`, the inferred response type is
|
|
|
+ * `TakeSnapshotResponse`.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_Operation_Response *response;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Service-specific metadata associated with the operation. It typically
|
|
|
+ * contains progress information and common metadata such as create time.
|
|
|
+ * Some services might not provide such metadata. Any method that returns a
|
|
|
+ * long-running operation should document the metadata type, if any.
|
|
|
+ *
|
|
|
+ * @note This class is documented as having more properties of any valid JSON
|
|
|
+ * type. Use @c -additionalJSONKeys and @c -additionalPropertyForName: to
|
|
|
+ * get the list of properties and then fetch them; or @c
|
|
|
+ * -additionalProperties to fetch them all at once.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Operation_Metadata : GTLRObject
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The normal response of the operation in case of success. If the original
|
|
|
+ * method returns no data on success, such as `Delete`, the response is
|
|
|
+ * `google.protobuf.Empty`. If the original method is standard
|
|
|
+ * `Get`/`Create`/`Update`, the response should be the resource. For other
|
|
|
+ * methods, the response should have the type `XxxResponse`, where `Xxx`
|
|
|
+ * is the original method name. For example, if the original method name
|
|
|
+ * is `TakeSnapshot()`, the inferred response type is
|
|
|
+ * `TakeSnapshotResponse`.
|
|
|
+ *
|
|
|
+ * @note This class is documented as having more properties of any valid JSON
|
|
|
+ * type. Use @c -additionalJSONKeys and @c -additionalPropertyForName: to
|
|
|
+ * get the list of properties and then fetch them; or @c
|
|
|
+ * -additionalProperties to fetch them all at once.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Operation_Response : GTLRObject
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Contains metadata for the BatchAnnotateImages operation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_OperationMetadata : GTLRObject
|
|
|
+
|
|
|
+/** The time when the batch request was received. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *createTime;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Current state of the batch operation.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_OperationMetadata_State_Cancelled The batch processing
|
|
|
+ * was cancelled. (Value: "CANCELLED")
|
|
|
+ * @arg @c kGTLRVision_OperationMetadata_State_Created Request is received.
|
|
|
+ * (Value: "CREATED")
|
|
|
+ * @arg @c kGTLRVision_OperationMetadata_State_Done The batch processing is
|
|
|
+ * done. (Value: "DONE")
|
|
|
+ * @arg @c kGTLRVision_OperationMetadata_State_Running Request is actively
|
|
|
+ * being processed. (Value: "RUNNING")
|
|
|
+ * @arg @c kGTLRVision_OperationMetadata_State_StateUnspecified Invalid.
|
|
|
+ * (Value: "STATE_UNSPECIFIED")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *state;
|
|
|
+
|
|
|
+/** The time when the operation result was last updated. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *updateTime;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The desired output location and metadata.
|
|
|
+ */
|
|
|
+@interface GTLRVision_OutputConfig : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The max number of response protos to put into each output JSON file on
|
|
|
+ * Google Cloud Storage.
|
|
|
+ * The valid range is [1, 100]. If not specified, the default value is 20.
|
|
|
+ * For example, for one pdf file with 100 pages, 100 response protos will
|
|
|
+ * be generated. If `batch_size` = 20, then 5 json files each
|
|
|
+ * containing 20 response protos will be written under the prefix
|
|
|
+ * `gcs_destination`.`uri`.
|
|
|
+ * Currently, batch_size only applies to GcsDestination, with potential future
|
|
|
+ * support for other output configurations.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *batchSize;
|
|
|
+
|
|
|
+/** The Google Cloud Storage location to write the output(s) to. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_GcsDestination *gcsDestination;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Detected page from OCR.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Page : GTLRObject
|
|
|
+
|
|
|
+/** List of blocks of text, images etc on this page. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Block *> *blocks;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results on the page. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Page height. For PDFs the unit is points. For images (including
|
|
|
+ * TIFFs) the unit is pixels.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *height;
|
|
|
+
|
|
|
+/** Additional information detected on the page. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_TextProperty *property;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Page width. For PDFs the unit is points. For images (including
|
|
|
+ * TIFFs) the unit is pixels.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *width;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Structural unit of text representing a number of words in certain order.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Paragraph : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding box for the paragraph.
|
|
|
+ * The vertices are in the order of top-left, top-right, bottom-right,
|
|
|
+ * bottom-left. When a rotation of the bounding box is detected the rotation
|
|
|
+ * is represented as around the top-left corner as defined when the text is
|
|
|
+ * read in the 'natural' orientation.
|
|
|
+ * For example:
|
|
|
+ * * when the text is horizontal it might look like:
|
|
|
+ * 0----1
|
|
|
+ * | |
|
|
|
+ * 3----2
|
|
|
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
|
|
|
+ * 2----3
|
|
|
+ * | |
|
|
|
+ * 1----0
|
|
|
+ * and the vertex order will still be (0, 1, 2, 3).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_BoundingPoly *boundingBox;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results for the paragraph. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/** Additional information detected for the paragraph. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_TextProperty *property;
|
|
|
+
|
|
|
+/** List of words in this paragraph. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Word *> *words;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A 3D position in the image, used primarily for Face detection landmarks.
|
|
|
+ * A valid Position must have both x and y coordinates.
|
|
|
+ * The position coordinates are in the same scale as the original image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Position : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * X coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *x;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Y coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *y;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Z coordinate (or depth).
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *z;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A Product contains ReferenceImages.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Product : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * User-provided metadata to be stored with this product. Must be at most 4096
|
|
|
+ * characters long.
|
|
|
+ *
|
|
|
+ * Remapped to 'descriptionProperty' to avoid NSObject's 'description'.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *descriptionProperty;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The user-provided name for this Product. Must not be empty. Must be at most
|
|
|
+ * 4096 characters long.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *displayName;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The resource name of the product.
|
|
|
+ * Format is:
|
|
|
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`.
|
|
|
+ * This field is ignored when creating a product.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *name;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The category for the product identified by the reference image. This should
|
|
|
+ * be either "homegoods", "apparel", or "toys".
|
|
|
+ * This field is immutable.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *productCategory;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Key-value pairs that can be attached to a product. At query time,
|
|
|
+ * constraints can be specified based on the product_labels.
|
|
|
+ * Note that integer values can be provided as strings, e.g. "1199". Only
|
|
|
+ * strings with integer values can match a range-based restriction which is
|
|
|
+ * to be supported soon.
|
|
|
+ * Multiple values can be assigned to the same key. One product may have up to
|
|
|
+ * 100 product_labels.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_KeyValue *> *productLabels;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Parameters for a product search request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_ProductSearchParams : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding polygon around the area of interest in the image.
|
|
|
+ * Optional. If it is not specified, system discretion will be applied.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_BoundingPoly *boundingPoly;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The filtering expression. This can be used to restrict search results based
|
|
|
+ * on Product labels. We currently support an AND of OR of key-value
|
|
|
+ * expressions, where each expression within an OR must have the same key.
|
|
|
+ * For example, "(color = red OR color = blue) AND brand = Google" is
|
|
|
+ * acceptable, but not "(color = red OR brand = Google)" or "color: red".
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *filter;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The list of product categories to search in. Currently, we only consider
|
|
|
+ * the first category, and either "homegoods", "apparel", or "toys" should be
|
|
|
+ * specified.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<NSString *> *productCategories;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The resource name of a ProductSet to be searched for similar images.
|
|
|
+ * Format is:
|
|
|
+ * `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *productSet;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Results for a product search request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_ProductSearchResults : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Timestamp of the index which provided these results. Changes made after
|
|
|
+ * this time are not reflected in the current results.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *indexTime;
|
|
|
+
|
|
|
+/**
|
|
|
+ * List of results grouped by products detected in the query image. Each entry
|
|
|
+ * corresponds to one bounding polygon in the query image, and contains the
|
|
|
+ * matching products specific to that region. There may be duplicate product
|
|
|
+ * matches in the union of all the per-product results.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_GroupedResult *> *productGroupedResults;
|
|
|
+
|
|
|
+/** List of results, one for each product match. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Result *> *results;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A ProductSet contains Products. A ProductSet can contain a maximum of 1
|
|
|
+ * million reference images. If the limit is exceeded, periodic indexing will
|
|
|
+ * fail.
|
|
|
+ */
|
|
|
+@interface GTLRVision_ProductSet : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The user-provided name for this ProductSet. Must not be empty. Must be at
|
|
|
+ * most 4096 characters long.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *displayName;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Output only. If there was an error with indexing the product set, the field
|
|
|
+ * is populated.
|
|
|
+ * This field is ignored when creating a ProductSet.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_Status *indexError;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Output only. The time at which this ProductSet was last indexed. Query
|
|
|
+ * results will reflect all updates before this time. If this ProductSet has
|
|
|
+ * never been indexed, this timestamp is the default value
|
|
|
+ * "1970-01-01T00:00:00Z".
|
|
|
+ * This field is ignored when creating a ProductSet.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRDateTime *indexTime;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The resource name of the ProductSet.
|
|
|
+ * Format is:
|
|
|
+ * `projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID`.
|
|
|
+ * This field is ignored when creating a ProductSet.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *name;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A `Property` consists of a user-supplied name/value pair.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Property : GTLRObject
|
|
|
+
|
|
|
+/** Name of the property. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *name;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Value of numeric properties.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of unsignedLongLongValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *uint64Value;
|
|
|
+
|
|
|
+/** Value of the property. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *value;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A `ReferenceImage` represents a product image and its associated metadata,
|
|
|
+ * such as bounding boxes.
|
|
|
+ */
|
|
|
+@interface GTLRVision_ReferenceImage : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Bounding polygons around the areas of interest in the reference image.
|
|
|
+ * Optional. If this field is empty, the system will try to detect regions of
|
|
|
+ * interest. At most 10 bounding polygons will be used.
|
|
|
+ * The provided shape is converted into a non-rotated rectangle. Once
|
|
|
+ * converted, the small edge of the rectangle must be greater than or equal
|
|
|
+ * to 300 pixels. The aspect ratio must be 1:4 or less (i.e. 1:3 is ok; 1:5
|
|
|
+ * is not).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_BoundingPoly *> *boundingPolys;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The resource name of the reference image.
|
|
|
+ * Format is:
|
|
|
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID/referenceImages/IMAGE_ID`.
|
|
|
+ * This field is ignored when creating a reference image.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *name;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The Google Cloud Storage URI of the reference image.
|
|
|
+ * The URI must start with `gs://`.
|
|
|
+ * Required.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *uri;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Request message for the `RemoveProductFromProductSet` method.
|
|
|
+ */
|
|
|
+@interface GTLRVision_RemoveProductFromProductSetRequest : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The resource name for the Product to be removed from this ProductSet.
|
|
|
+ * Format is:
|
|
|
+ * `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *product;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Information about a product.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Result : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The resource name of the image from the product that is the closest match
|
|
|
+ * to the query.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *image;
|
|
|
+
|
|
|
+/** The Product. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_Product *product;
|
|
|
+
|
|
|
+/**
|
|
|
+ * A confidence level on the match, ranging from 0 (no confidence) to
|
|
|
+ * 1 (full confidence).
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Set of features pertaining to the image, computed by computer vision
|
|
|
+ * methods over safe-search verticals (for example, adult, spoof, medical,
|
|
|
+ * violence).
|
|
|
+ */
|
|
|
+@interface GTLRVision_SafeSearchAnnotation : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Represents the adult content likelihood for the image. Adult content may
|
|
|
+ * contain elements such as nudity, pornographic images or cartoons, or
|
|
|
+ * sexual activities.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Adult_Likely It is likely that
|
|
|
+ * the image belongs to the specified vertical. (Value: "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Adult_Possible It is possible
|
|
|
+ * that the image belongs to the specified vertical. (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Adult_Unknown Unknown likelihood.
|
|
|
+ * (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Adult_Unlikely It is unlikely
|
|
|
+ * that the image belongs to the specified vertical. (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Adult_VeryLikely It is very
|
|
|
+ * likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Adult_VeryUnlikely It is very
|
|
|
+ * unlikely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *adult;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Likelihood that this is a medical image.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Medical_Likely It is likely that
|
|
|
+ * the image belongs to the specified vertical. (Value: "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Medical_Possible It is possible
|
|
|
+ * that the image belongs to the specified vertical. (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Medical_Unknown Unknown
|
|
|
+ * likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Medical_Unlikely It is unlikely
|
|
|
+ * that the image belongs to the specified vertical. (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Medical_VeryLikely It is very
|
|
|
+ * likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Medical_VeryUnlikely It is very
|
|
|
+ * unlikely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *medical;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Likelihood that the request image contains racy content. Racy content may
|
|
|
+ * include (but is not limited to) skimpy or sheer clothing, strategically
|
|
|
+ * covered nudity, lewd or provocative poses, or close-ups of sensitive
|
|
|
+ * body areas.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Racy_Likely It is likely that the
|
|
|
+ * image belongs to the specified vertical. (Value: "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Racy_Possible It is possible that
|
|
|
+ * the image belongs to the specified vertical. (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Racy_Unknown Unknown likelihood.
|
|
|
+ * (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Racy_Unlikely It is unlikely that
|
|
|
+ * the image belongs to the specified vertical. (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Racy_VeryLikely It is very likely
|
|
|
+ * that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Racy_VeryUnlikely It is very
|
|
|
+ * unlikely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *racy;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Spoof likelihood. The likelihood that an modification
|
|
|
+ * was made to the image's canonical version to make it appear
|
|
|
+ * funny or offensive.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Spoof_Likely It is likely that
|
|
|
+ * the image belongs to the specified vertical. (Value: "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Spoof_Possible It is possible
|
|
|
+ * that the image belongs to the specified vertical. (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Spoof_Unknown Unknown likelihood.
|
|
|
+ * (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Spoof_Unlikely It is unlikely
|
|
|
+ * that the image belongs to the specified vertical. (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Spoof_VeryLikely It is very
|
|
|
+ * likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Spoof_VeryUnlikely It is very
|
|
|
+ * unlikely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *spoof;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Likelihood that this image contains violent content.
|
|
|
+ *
|
|
|
+ * Likely values:
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Violence_Likely It is likely that
|
|
|
+ * the image belongs to the specified vertical. (Value: "LIKELY")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Violence_Possible It is possible
|
|
|
+ * that the image belongs to the specified vertical. (Value: "POSSIBLE")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Violence_Unknown Unknown
|
|
|
+ * likelihood. (Value: "UNKNOWN")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Violence_Unlikely It is unlikely
|
|
|
+ * that the image belongs to the specified vertical. (Value: "UNLIKELY")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Violence_VeryLikely It is very
|
|
|
+ * likely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_LIKELY")
|
|
|
+ * @arg @c kGTLRVision_SafeSearchAnnotation_Violence_VeryUnlikely It is very
|
|
|
+ * unlikely that the image belongs to the specified vertical. (Value:
|
|
|
+ * "VERY_UNLIKELY")
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *violence;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * The `Status` type defines a logical error model that is suitable for
|
|
|
+ * different
|
|
|
+ * programming environments, including REST APIs and RPC APIs. It is used by
|
|
|
+ * [gRPC](https://github.com/grpc). The error model is designed to be:
|
|
|
+ * - Simple to use and understand for most users
|
|
|
+ * - Flexible enough to meet unexpected needs
|
|
|
+ * # Overview
|
|
|
+ * The `Status` message contains three pieces of data: error code, error
|
|
|
+ * message,
|
|
|
+ * and error details. The error code should be an enum value of
|
|
|
+ * google.rpc.Code, but it may accept additional error codes if needed. The
|
|
|
+ * error message should be a developer-facing English message that helps
|
|
|
+ * developers *understand* and *resolve* the error. If a localized user-facing
|
|
|
+ * error message is needed, put the localized message in the error details or
|
|
|
+ * localize it in the client. The optional error details may contain arbitrary
|
|
|
+ * information about the error. There is a predefined set of error detail types
|
|
|
+ * in the package `google.rpc` that can be used for common error conditions.
|
|
|
+ * # Language mapping
|
|
|
+ * The `Status` message is the logical representation of the error model, but
|
|
|
+ * it
|
|
|
+ * is not necessarily the actual wire format. When the `Status` message is
|
|
|
+ * exposed in different client libraries and different wire protocols, it can
|
|
|
+ * be
|
|
|
+ * mapped differently. For example, it will likely be mapped to some exceptions
|
|
|
+ * in Java, but more likely mapped to some error codes in C.
|
|
|
+ * # Other uses
|
|
|
+ * The error model and the `Status` message can be used in a variety of
|
|
|
+ * environments, either with or without APIs, to provide a
|
|
|
+ * consistent developer experience across different environments.
|
|
|
+ * Example uses of this error model include:
|
|
|
+ * - Partial errors. If a service needs to return partial errors to the client,
|
|
|
+ * it may embed the `Status` in the normal response to indicate the partial
|
|
|
+ * errors.
|
|
|
+ * - Workflow errors. A typical workflow has multiple steps. Each step may
|
|
|
+ * have a `Status` message for error reporting.
|
|
|
+ * - Batch operations. If a client uses batch request and batch response, the
|
|
|
+ * `Status` message should be used directly inside batch response, one for
|
|
|
+ * each error sub-response.
|
|
|
+ * - Asynchronous operations. If an API call embeds asynchronous operation
|
|
|
+ * results in its response, the status of those operations should be
|
|
|
+ * represented directly using the `Status` message.
|
|
|
+ * - Logging. If some API errors are stored in logs, the message `Status` could
|
|
|
+ * be used directly after any stripping needed for security/privacy reasons.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Status : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The status code, which should be an enum value of google.rpc.Code.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *code;
|
|
|
+
|
|
|
+/**
|
|
|
+ * A list of messages that carry the error details. There is a common set of
|
|
|
+ * message types for APIs to use.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Status_Details_Item *> *details;
|
|
|
+
|
|
|
+/**
|
|
|
+ * A developer-facing error message, which should be in English. Any
|
|
|
+ * user-facing error message should be localized and sent in the
|
|
|
+ * google.rpc.Status.details field, or localized by the client.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *message;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * GTLRVision_Status_Details_Item
|
|
|
+ *
|
|
|
+ * @note This class is documented as having more properties of any valid JSON
|
|
|
+ * type. Use @c -additionalJSONKeys and @c -additionalPropertyForName: to
|
|
|
+ * get the list of properties and then fetch them; or @c
|
|
|
+ * -additionalProperties to fetch them all at once.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Status_Details_Item : GTLRObject
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A single symbol representation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Symbol : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding box for the symbol.
|
|
|
+ * The vertices are in the order of top-left, top-right, bottom-right,
|
|
|
+ * bottom-left. When a rotation of the bounding box is detected the rotation
|
|
|
+ * is represented as around the top-left corner as defined when the text is
|
|
|
+ * read in the 'natural' orientation.
|
|
|
+ * For example:
|
|
|
+ * * when the text is horizontal it might look like:
|
|
|
+ * 0----1
|
|
|
+ * | |
|
|
|
+ * 3----2
|
|
|
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
|
|
|
+ * 2----3
|
|
|
+ * | |
|
|
|
+ * 1----0
|
|
|
+ * and the vertice order will still be (0, 1, 2, 3).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_BoundingPoly *boundingBox;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results for the symbol. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/** Additional information detected for the symbol. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_TextProperty *property;
|
|
|
+
|
|
|
+/** The actual UTF-8 representation of the symbol. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *text;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * TextAnnotation contains a structured representation of OCR extracted text.
|
|
|
+ * The hierarchy of an OCR extracted text structure is like this:
|
|
|
+ * TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
|
|
|
+ * Each structural component, starting from Page, may further have their own
|
|
|
+ * properties. Properties describe detected languages, breaks etc.. Please
|
|
|
+ * refer
|
|
|
+ * to the TextAnnotation.TextProperty message definition below for more
|
|
|
+ * detail.
|
|
|
+ */
|
|
|
+@interface GTLRVision_TextAnnotation : GTLRObject
|
|
|
+
|
|
|
+/** List of pages detected by OCR. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Page *> *pages;
|
|
|
+
|
|
|
+/** UTF-8 text detected on the pages. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *text;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Additional information detected on the structural component.
|
|
|
+ */
|
|
|
+@interface GTLRVision_TextProperty : GTLRObject
|
|
|
+
|
|
|
+/** Detected start or end of a text segment. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_DetectedBreak *detectedBreak;
|
|
|
+
|
|
|
+/** A list of detected languages together with confidence. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_DetectedLanguage *> *detectedLanguages;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A vertex represents a 2D point in the image.
|
|
|
+ * NOTE: the vertex coordinates are in the same scale as the original image.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Vertex : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * X coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *x;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Y coordinate.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of intValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *y;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Relevant information for the image from the Internet.
|
|
|
+ */
|
|
|
+@interface GTLRVision_WebDetection : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The service's best guess as to the topic of the request image.
|
|
|
+ * Inferred from similar images on the open web.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_WebLabel *> *bestGuessLabels;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Fully matching images from the Internet.
|
|
|
+ * Can include resized copies of the query image.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_WebImage *> *fullMatchingImages;
|
|
|
+
|
|
|
+/** Web pages containing the matching images from the Internet. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_WebPage *> *pagesWithMatchingImages;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Partial matching images from the Internet.
|
|
|
+ * Those images are similar enough to share some key-point features. For
|
|
|
+ * example an original image will likely have partial matching for its crops.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_WebImage *> *partialMatchingImages;
|
|
|
+
|
|
|
+/** The visually similar image results. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_WebImage *> *visuallySimilarImages;
|
|
|
+
|
|
|
+/** Deduced entities from similar images on the Internet. */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_WebEntity *> *webEntities;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Parameters for web detection request.
|
|
|
+ */
|
|
|
+@interface GTLRVision_WebDetectionParams : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Whether to include results derived from the geo information in the image.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of boolValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *includeGeoResults;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Entity deduced from similar images on the Internet.
|
|
|
+ */
|
|
|
+@interface GTLRVision_WebEntity : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Canonical description of the entity, in English.
|
|
|
+ *
|
|
|
+ * Remapped to 'descriptionProperty' to avoid NSObject's 'description'.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *descriptionProperty;
|
|
|
+
|
|
|
+/** Opaque entity ID. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *entityId;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Overall relevancy score for the entity.
|
|
|
+ * Not normalized and not comparable across different image queries.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Metadata for online images.
|
|
|
+ */
|
|
|
+@interface GTLRVision_WebImage : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * (Deprecated) Overall relevancy score for the image.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+/** The result image URL. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *url;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Label to provide extra metadata for the web detection.
|
|
|
+ */
|
|
|
+@interface GTLRVision_WebLabel : GTLRObject
|
|
|
+
|
|
|
+/** Label for extra metadata. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *label;
|
|
|
+
|
|
|
+/**
|
|
|
+ * The BCP-47 language code for `label`, such as "en-US" or "sr-Latn".
|
|
|
+ * For more information, see
|
|
|
+ * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
|
|
|
+ */
|
|
|
+@property(nonatomic, copy, nullable) NSString *languageCode;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * Metadata for web pages.
|
|
|
+ */
|
|
|
+@interface GTLRVision_WebPage : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * Fully matching images on the page.
|
|
|
+ * Can include resized copies of the query image.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_WebImage *> *fullMatchingImages;
|
|
|
+
|
|
|
+/** Title for the web page, may contain HTML markups. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *pageTitle;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Partial matching images on the page.
|
|
|
+ * Those images are similar enough to share some key-point features. For
|
|
|
+ * example an original image will likely have partial matching for its
|
|
|
+ * crops.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_WebImage *> *partialMatchingImages;
|
|
|
+
|
|
|
+/**
|
|
|
+ * (Deprecated) Overall relevancy score for the web page.
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *score;
|
|
|
+
|
|
|
+/** The result web page URL. */
|
|
|
+@property(nonatomic, copy, nullable) NSString *url;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * A word representation.
|
|
|
+ */
|
|
|
+@interface GTLRVision_Word : GTLRObject
|
|
|
+
|
|
|
+/**
|
|
|
+ * The bounding box for the word.
|
|
|
+ * The vertices are in the order of top-left, top-right, bottom-right,
|
|
|
+ * bottom-left. When a rotation of the bounding box is detected the rotation
|
|
|
+ * is represented as around the top-left corner as defined when the text is
|
|
|
+ * read in the 'natural' orientation.
|
|
|
+ * For example:
|
|
|
+ * * when the text is horizontal it might look like:
|
|
|
+ * 0----1
|
|
|
+ * | |
|
|
|
+ * 3----2
|
|
|
+ * * when it's rotated 180 degrees around the top-left corner it becomes:
|
|
|
+ * 2----3
|
|
|
+ * | |
|
|
|
+ * 1----0
|
|
|
+ * and the vertex order will still be (0, 1, 2, 3).
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_BoundingPoly *boundingBox;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Confidence of the OCR results for the word. Range [0, 1].
|
|
|
+ *
|
|
|
+ * Uses NSNumber of floatValue.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSNumber *confidence;
|
|
|
+
|
|
|
+/** Additional information detected for the word. */
|
|
|
+@property(nonatomic, strong, nullable) GTLRVision_TextProperty *property;
|
|
|
+
|
|
|
+/**
|
|
|
+ * List of symbols in the word.
|
|
|
+ * The order of the symbols follows the natural reading order.
|
|
|
+ */
|
|
|
+@property(nonatomic, strong, nullable) NSArray<GTLRVision_Symbol *> *symbols;
|
|
|
+
|
|
|
+@end
|
|
|
+
|
|
|
+NS_ASSUME_NONNULL_END
|
|
|
+
|
|
|
+#pragma clang diagnostic pop
|