1212# See the License for the specific language governing permissions and
1313# limitations under the License.
1414
15- import json
15+ import atexit
1616import logging
1717
18- import requests
19-
2018from opencensus .common import utils as common_utils
2119from opencensus .ext .azure .common import Options , utils
2220from opencensus .ext .azure .common .processor import ProcessorMixin
2624 Envelope ,
2725 MetricData ,
2826)
27+ from opencensus .ext .azure .common .storage import LocalFileStorage
28+ from opencensus .ext .azure .common .transport import TransportMixin
2929from opencensus .ext .azure .metrics_exporter import standard_metrics
3030from opencensus .metrics import transport
3131from opencensus .metrics .export .metric_descriptor import MetricDescriptorType
3636logger = logging .getLogger (__name__ )
3737
3838
39- class MetricsExporter (ProcessorMixin ):
39+ class MetricsExporter (TransportMixin , ProcessorMixin ):
4040 """Metrics exporter for Microsoft Azure Monitor."""
4141
42- def __init__ (self , options = None ):
43- if options is None :
44- options = Options ()
45- self .options = options
42+ def __init__ (self , ** options ):
43+ self .options = Options (** options )
4644 utils .validate_instrumentation_key (self .options .instrumentation_key )
4745 if self .options .max_batch_size <= 0 :
4846 raise ValueError ('Max batch size must be at least 1.' )
47+ self .export_interval = self .options .export_interval
4948 self .max_batch_size = self .options .max_batch_size
5049 self ._telemetry_processors = []
50+ self .storage = LocalFileStorage (
51+ path = self .options .storage_path ,
52+ max_size = self .options .storage_max_size ,
53+ maintenance_period = self .options .storage_maintenance_period ,
54+ retention_period = self .options .storage_retention_period ,
55+ )
5156 super (MetricsExporter , self ).__init__ ()
5257
5358 def export_metrics (self , metrics ):
54- if metrics :
55- envelopes = []
56- for metric in metrics :
57- # No support for histogram aggregations
58- type_ = metric .descriptor .type
59- if type_ != MetricDescriptorType .CUMULATIVE_DISTRIBUTION :
60- md = metric .descriptor
61- # Each time series will be uniquely identified by its
62- # label values
63- for time_series in metric .time_series :
64- # Using stats, time_series should only have one point
65- # which contains the aggregated value
66- data_point = self .create_data_points (
67- time_series , md )[0 ]
68- # The timestamp is when the metric was recorded
69- time_stamp = time_series .points [0 ].timestamp
70- # Get the properties using label keys from metric and
71- # label values of the time series
72- properties = self .create_properties (time_series , md )
73- envelopes .append (self .create_envelope (data_point ,
74- time_stamp ,
75- properties ))
76- # Send data in batches of max_batch_size
77- if envelopes :
78- batched_envelopes = list (common_utils .window (
79- envelopes , self .max_batch_size ))
80- for batch in batched_envelopes :
81- batch = self .apply_telemetry_processors (batch )
82- self ._transmit_without_retry (batch )
83-
84- def create_data_points (self , time_series , metric_descriptor ):
59+ envelopes = []
60+ for metric in metrics :
61+ envelopes .extend (self .metric_to_envelopes (metric ))
62+ # Send data in batches of max_batch_size
63+ batched_envelopes = list (common_utils .window (
64+ envelopes , self .max_batch_size ))
65+ for batch in batched_envelopes :
66+ batch = self .apply_telemetry_processors (batch )
67+ result = self ._transmit (batch )
68+ if result > 0 :
69+ self .storage .put (batch , result )
70+
71+ # If there is still room to transmit envelopes, transmit from storage
72+ # if available
73+ if len (envelopes ) < self .options .max_batch_size :
74+ self ._transmit_from_storage ()
75+
76+ def metric_to_envelopes (self , metric ):
77+ envelopes = []
78+ # No support for histogram aggregations
79+ if (metric .descriptor .type !=
80+ MetricDescriptorType .CUMULATIVE_DISTRIBUTION ):
81+ md = metric .descriptor
82+ # Each time series will be uniquely identified by its
83+ # label values
84+ for time_series in metric .time_series :
85+ # Using stats, time_series should only have one
86+ # point which contains the aggregated value
87+ data_point = self ._create_data_points (
88+ time_series , md )[0 ]
89+ # The timestamp is when the metric was recorded
90+ timestamp = time_series .points [0 ].timestamp
91+ # Get the properties using label keys from metric
92+ # and label values of the time series
93+ properties = self ._create_properties (time_series , md )
94+ envelopes .append (self ._create_envelope (data_point ,
95+ timestamp ,
96+ properties ))
97+ return envelopes
98+
99+ def _create_data_points (self , time_series , metric_descriptor ):
85100 """Convert a metric's OC time series to list of Azure data points."""
86101 data_points = []
87102 for point in time_series .points :
@@ -92,7 +107,7 @@ def create_data_points(self, time_series, metric_descriptor):
92107 data_points .append (data_point )
93108 return data_points
94109
95- def create_properties (self , time_series , metric_descriptor ):
110+ def _create_properties (self , time_series , metric_descriptor ):
96111 properties = {}
97112 # We construct a properties map from the label keys and values. We
98113 # assume the ordering is already correct
@@ -104,11 +119,11 @@ def create_properties(self, time_series, metric_descriptor):
104119 properties [metric_descriptor .label_keys [i ].key ] = value
105120 return properties
106121
107- def create_envelope (self , data_point , time_stamp , properties ):
122+ def _create_envelope (self , data_point , timestamp , properties ):
108123 envelope = Envelope (
109124 iKey = self .options .instrumentation_key ,
110125 tags = dict (utils .azure_monitor_context ),
111- time = time_stamp .isoformat (),
126+ time = timestamp .isoformat (),
112127 )
113128 envelope .name = "Microsoft.ApplicationInsights.Metric"
114129 data = MetricData (
@@ -118,125 +133,14 @@ def create_envelope(self, data_point, time_stamp, properties):
118133 envelope .data = Data (baseData = data , baseType = "MetricData" )
119134 return envelope
120135
121- def _transmit_without_retry (self , envelopes ):
122- # Contains logic from transport._transmit
123- # TODO: Remove this function from exporter and consolidate with
124- # transport._transmit to cover all exporter use cases. Uses cases
125- # pertain to properly handling failures and implementing a retry
126- # policy for this exporter.
127- # TODO: implement retry policy
128- """
129- Transmit the data envelopes to the ingestion service.
130- Does not perform retry logic. For partial success and
131- non-retryable failure, simply outputs result to logs.
132- This function should never throw exception.
133- """
134- try :
135- response = requests .post (
136- url = self .options .endpoint ,
137- data = json .dumps (envelopes ),
138- headers = {
139- 'Accept' : 'application/json' ,
140- 'Content-Type' : 'application/json; charset=utf-8' ,
141- },
142- timeout = self .options .timeout ,
143- )
144- except Exception as ex :
145- # No retry policy, log output
146- logger .warning ('Transient client side error %s.' , ex )
147- return
148-
149- text = 'N/A'
150- data = None
151- # Handle the possible results from the response
152- if response is None :
153- logger .warning ('Error: cannot read response.' )
154- return
155- try :
156- status_code = response .status_code
157- except Exception as ex :
158- logger .warning ('Error while reading response status code %s.' , ex )
159- return
160- try :
161- text = response .text
162- except Exception as ex :
163- logger .warning ('Error while reading response body %s.' , ex )
164- return
165- try :
166- data = json .loads (text )
167- except Exception as ex :
168- logger .warning ('Error while loading ' +
169- 'json from response body %s.' , ex )
170- return
171- if status_code == 200 :
172- logger .info ('Transmission succeeded: %s.' , text )
173- return
174- # Check for retryable partial content
175- if status_code == 206 :
176- if data :
177- try :
178- retryable_envelopes = []
179- for error in data ['errors' ]:
180- if error ['statusCode' ] in (
181- 429 , # Too Many Requests
182- 500 , # Internal Server Error
183- 503 , # Service Unavailable
184- ):
185- retryable_envelopes .append (
186- envelopes [error ['index' ]])
187- else :
188- logger .error (
189- 'Data drop %s: %s %s.' ,
190- error ['statusCode' ],
191- error ['message' ],
192- envelopes [error ['index' ]],
193- )
194- # show the envelopes that can be retried manually for
195- # visibility
196- if retryable_envelopes :
197- logger .warning (
198- 'Error while processing data. Data dropped. ' +
199- 'Consider manually retrying for envelopes: %s.' ,
200- retryable_envelopes
201- )
202- return
203- except Exception :
204- logger .exception (
205- 'Error while processing %s: %s.' ,
206- status_code ,
207- text
208- )
209- return
210- # Check for non-retryable result
211- if status_code in (
212- 206 , # Partial Content
213- 429 , # Too Many Requests
214- 500 , # Internal Server Error
215- 503 , # Service Unavailable
216- ):
217- # server side error (retryable)
218- logger .warning (
219- 'Transient server side error %s: %s. ' +
220- 'Consider manually trying.' ,
221- status_code ,
222- text ,
223- )
224- else :
225- # server side error (non-retryable)
226- logger .error (
227- 'Non-retryable server side error %s: %s.' ,
228- status_code ,
229- text ,
230- )
231-
232136
233137def new_metrics_exporter (** options ):
234- options_ = Options (** options )
235- exporter = MetricsExporter (options = options_ )
138+ exporter = MetricsExporter (** options )
236139 producers = [stats_module .stats ]
237- if options_ .enable_standard_metrics :
140+ if exporter . options .enable_standard_metrics :
238141 producers .append (standard_metrics .producer )
239142 transport .get_exporter_thread (producers ,
240143 exporter ,
241- interval = options_ .export_interval )
144+ interval = exporter .options .export_interval )
145+ atexit .register (exporter .export_metrics , stats_module .stats .get_metrics ())
242146 return exporter
0 commit comments