arduino-audio-tools
AudioClientRTSP.h
1 
2 #pragma once
3 
20 // Copyright (c) 1996-2023, Live Networks, Inc. All rights reserved
21 // A demo application, showing how to create and run a RTSP client (that can
22 // potentially receive multiple streams concurrently).
23 //
24 
25 #include "Print.h" // Arduino Print
26 // include live555
27 #include "BasicUsageEnvironment.hh"
28 //#include "liveMedia.hh"
29 #include "RTSPClient.hh"
30 
31 // By default, we request that the server stream its data using RTP/UDP.
32 // If, instead, you want to request that the server stream via RTP-over-TCP,
33 // change the following to True:
34 #define REQUEST_STREAMING_OVER_TCP false
35 
36 // by default, print verbose output from each "RTSPClient"
37 #define RTSP_CLIENT_VERBOSITY_LEVEL 1
38 // Even though we're not going to be doing anything with the incoming data, we
39 // still need to receive it. Define the size of the buffer that we'll use:
40 #define RTSP_SINK_BUFFER_SIZE 1024
41 
42 // If you don't want to see debugging output for each received frame, then
43 // comment out the following line:
44 #undef DEBUG_PRINT_EACH_RECEIVED_FRAME
45 #define DEBUG_PRINT_EACH_RECEIVED_FRAME 0
46 
48 namespace audiotools_rtsp {
49 
50 class OurRTSPClient;
51 // The main streaming routine (or each "rtsp://" URL):
52 OurRTSPClient * openURL(UsageEnvironment& env, char const* progName, char const* rtspURL);
53 // Counts how many streams (i.e., "RTSPClient"s) are currently in use.
54 static unsigned rtspClientCount = 0;
55 static char rtspEventLoopWatchVariable = 0;
56 static Print* rtspOutput = nullptr;
57 static uint32_t rtspSinkReceiveBufferSize = 0;
58 static bool rtspUseTCP = REQUEST_STREAMING_OVER_TCP;
59 
60 } // namespace audiotools_rtsp
61 
62 namespace audio_tools {
63 
71  public:
72  AudioClientRTSP(uint32_t receiveBufferSize = RTSP_SINK_BUFFER_SIZE, bool useTCP=REQUEST_STREAMING_OVER_TCP, bool blocking = false) {
73  setBufferSize(receiveBufferSize);
74  useTCP ? setTCP() : setUDP();
75  setBlocking(blocking);
76  }
77 
78  void setBufferSize(int size){
79  audiotools_rtsp::rtspSinkReceiveBufferSize = size;
80  }
81 
82  void setTCP(){
83  audiotools_rtsp::rtspUseTCP = true;
84  }
85 
86  void setUDP(){
87  audiotools_rtsp::rtspUseTCP = false;
88  }
89 
90  void setBlocking(bool flag){
91  is_blocking = flag;
92  }
93 
95  void setLogin(const char* ssid, const char* password){
96  this->ssid = ssid;
97  this->password = password;
98  }
99 
101  bool begin(const char* url, Print &out) {
102  audiotools_rtsp::rtspOutput = &out;
103  if (url==nullptr) {
104  return false;
105  }
106  if (!login()){
107  LOGE("wifi down");
108  return false;
109  }
110  // Begin by setting up our usage environment:
111  scheduler = BasicTaskScheduler::createNew();
112  env = BasicUsageEnvironment::createNew(*scheduler);
113 
114  // There are argc-1 URLs: argv[1] through argv[argc-1]. Open and start
115  // streaming each one:
116  rtsp_client = audiotools_rtsp::openURL(*env, "RTSPClient", url);
117 
118  // All subsequent activity takes place within the event loop:
119  if (is_blocking) env->taskScheduler().doEventLoop(&audiotools_rtsp::rtspEventLoopWatchVariable);
120  // This function call does not return, unless, at some point in time,
121  // "rtspEventLoopWatchVariable" gets set to something non-zero.
122 
123  return true;
124  }
125 
127  void loop() {
128  if (audiotools_rtsp::rtspEventLoopWatchVariable==0) scheduler->SingleStep();
129  }
130 
131  void end() {
132  audiotools_rtsp::rtspEventLoopWatchVariable = 1;
133  env->reclaim();
134  env = NULL;
135  delete scheduler;
136  scheduler = NULL;
137  bool is_blocking = false;
138  }
139 
141  return rtsp_client;
142  }
143 
144  protected:
145  audiotools_rtsp::OurRTSPClient* rtsp_client;
146  UsageEnvironment* env=nullptr;
147  BasicTaskScheduler* scheduler=nullptr;
148  const char* ssid=nullptr;
149  const char* password = nullptr;
150  bool is_blocking = false;
151 
153  bool login(){
154  if(WiFi.status() != WL_CONNECTED && ssid!=nullptr && password!=nullptr){
155  WiFi.mode(WIFI_STA);
156  WiFi.begin(ssid, password);
157  while(WiFi.status() != WL_CONNECTED){
158  Serial.print(".");
159  delay(100);
160  }
161  Serial.println();
162  Serial.print("Local Address: ");
163  Serial.println(WiFi.localIP());
164  }
165  return WiFi.status() == WL_CONNECTED;
166  }
167 
168 
169 };
170 
171 } // namespace audio_tools
172 
173 namespace audiotools_rtsp {
174 // Define a class to hold per-stream state that we maintain throughout each
175 // stream's lifetime:
176 
177 // Forward function definitions:
178 
179 // RTSP 'response handlers':
180 void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode,
181  char* resultString);
182 void continueAfterSETUP(RTSPClient* rtspClient, int resultCode,
183  char* resultString);
184 void continueAfterPLAY(RTSPClient* rtspClient, int resultCode,
185  char* resultString);
186 
187 // Other event handler functions:
188 void subsessionAfterPlaying(
189  void* clientData); // called when a stream's subsession (e.g., audio or
190  // video substream) ends
191 void subsessionByeHandler(void* clientData, char const* reason);
192 // called when a RTCP "BYE" is received for a subsession
193 void streamTimerHandler(void* clientData);
194 // called at the end of a stream's expected duration (if the stream has not
195 // already signaled its end using a RTCP "BYE")
196 
197 // Used to iterate through each stream's 'subsessions', setting up each one:
198 void setupNextSubsession(RTSPClient* rtspClient);
199 
200 // Used to shut down and close a stream (including its "RTSPClient" object):
201 void shutdownStream(RTSPClient* rtspClient, int exitCode = 1);
202 
203 // A function that outputs a string that identifies each stream (for debugging
204 // output). Modify this if you wish:
205 UsageEnvironment& operator<<(UsageEnvironment& env,
206  const RTSPClient& rtspClient) {
207  return env << "[URL:\"" << rtspClient.url() << "\"]: ";
208 }
209 
210 // A function that outputs a string that identifies each subsession (for
211 // debugging output). Modify this if you wish:
212 UsageEnvironment& operator<<(UsageEnvironment& env,
213  const MediaSubsession& subsession) {
214  return env << subsession.mediumName() << "/" << subsession.codecName();
215 }
216 
218  public:
220  virtual ~StreamClientState();
221 
222  public:
223  MediaSubsessionIterator* iter;
224  MediaSession* session;
225  MediaSubsession* subsession;
226  TaskToken streamTimerTask;
227  double duration;
228 };
229 
230 // If you're streaming just a single stream (i.e., just from a single URL,
231 // once), then you can define and use just a single "StreamClientState"
232 // structure, as a global variable in your application. However, because - in
233 // this demo application - we're showing how to play multiple streams,
234 // concurrently, we can't do that. Instead, we have to have a separate
235 // "StreamClientState" structure for each "RTSPClient". To do this, we subclass
236 // "RTSPClient", and add a "StreamClientState" field to the subclass:
237 
238 class OurRTSPClient : public RTSPClient {
239  public:
240  static OurRTSPClient* createNew(UsageEnvironment& env, char const* rtspURL,
241  int verbosityLevel = 0,
242  char const* applicationName = NULL,
243  portNumBits tunnelOverHTTPPortNum = 0);
244 
245  protected:
246  OurRTSPClient(UsageEnvironment& env, char const* rtspURL, int verbosityLevel,
247  char const* applicationName, portNumBits tunnelOverHTTPPortNum);
248  // called only by createNew();
249  virtual ~OurRTSPClient();
250 
251  public:
252  StreamClientState scs;
253 };
254 
255 // Define a data sink (a subclass of "MediaSink") to receive the data for each
256 // subsession (i.e., each audio or video 'substream'). In practice, this might
257 // be a class (or a chain of classes) that decodes and then renders the incoming
258 // audio or video. Or it might be a "FileSink", for outputting the received data
259 // into a file (as is done by the "openRTSP" application). In this example code,
260 // however, we define a simple 'dummy' sink that receives incoming data, but
261 // does nothing with it.
262 
263 class OurSink : public MediaSink {
264  public:
265  static OurSink* createNew(
266  UsageEnvironment& env,
267  MediaSubsession&
268  subsession, // identifies the kind of data that's being received
269  char const* streamId = NULL); // identifies the stream itself (optional)
270 
271  private:
272  OurSink(UsageEnvironment& env, MediaSubsession& subsession,
273  char const* streamId);
274  // called only by "createNew()"
275  virtual ~OurSink();
276 
277  static void afterGettingFrame(void* clientData, unsigned frameSize,
278  unsigned numTruncatedBytes,
279  struct timeval presentationTime,
280  unsigned durationInMicroseconds);
281  void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
282  struct timeval presentationTime,
283  unsigned durationInMicroseconds);
284 
285  private:
286  // redefined virtual functions:
287  virtual Boolean continuePlaying();
288 
289  private:
290  u_int8_t* fReceiveBuffer;
291  MediaSubsession& fSubsession;
292  char* fStreamId;
293 };
294 
295 OurRTSPClient* openURL(UsageEnvironment& env, char const* progName, char const* rtspURL) {
296  // Begin by creating a "RTSPClient" object. Note that there is a separate
297  // "RTSPClient" object for each stream that we wish to receive (even if more
298  // than stream uses the same "rtsp://" URL).
299  OurRTSPClient* rtspClient = OurRTSPClient::createNew(
300  env, rtspURL, RTSP_CLIENT_VERBOSITY_LEVEL, progName);
301  if (rtspClient == NULL) {
302  env << "Failed to create a RTSP client for URL \"" << rtspURL
303  << "\": " << env.getResultMsg() << "\n";
304  return nullptr;
305  }
306 
307  ++rtspClientCount;
308 
309  // Next, send a RTSP "DESCRIBE" command, to get a SDP description for the
310  // stream. Note that this command - like all RTSP commands - is sent
311  // asynchronously; we do not block, waiting for a response. Instead, the
312  // following function call returns immediately, and we handle the RTSP
313  // response later, from within the event loop:
314  rtspClient->sendDescribeCommand(continueAfterDESCRIBE);
315  return rtspClient;
316 }
317 
318 // Implementation of the RTSP 'response handlers':
319 
320 void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode,
321  char* resultString) {
322  do {
323  UsageEnvironment& env = rtspClient->envir(); // alias
324  StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias
325 
326  if (resultCode != 0) {
327  env << *rtspClient << "Failed to get a SDP description: " << resultString
328  << "\n";
329  delete[] resultString;
330  break;
331  }
332 
333  char* const sdpDescription = resultString;
334  env << *rtspClient << "Got a SDP description:\n" << sdpDescription << "\n";
335 
336  // Create a media session object from this SDP description:
337  scs.session = MediaSession::createNew(env, sdpDescription);
338  delete[] sdpDescription; // because we don't need it anymore
339  if (scs.session == NULL) {
340  env << *rtspClient
341  << "Failed to create a MediaSession object from the SDP description: "
342  << env.getResultMsg() << "\n";
343  break;
344  } else if (!scs.session->hasSubsessions()) {
345  env << *rtspClient
346  << "This session has no media subsessions (i.e., no \"m=\" lines)\n";
347  break;
348  }
349 
350  // Then, create and set up our data source objects for the session. We do
351  // this by iterating over the session's 'subsessions', calling
352  // "MediaSubsession::initiate()", and then sending a RTSP "SETUP" command,
353  // on each one. (Each 'subsession' will have its own data source.)
354  scs.iter = new MediaSubsessionIterator(*scs.session);
355  setupNextSubsession(rtspClient);
356  return;
357  } while (0);
358 
359  // An unrecoverable error occurred with this stream.
360  shutdownStream(rtspClient);
361 }
362 
363 void setupNextSubsession(RTSPClient* rtspClient) {
364  UsageEnvironment& env = rtspClient->envir(); // alias
365  StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias
366 
367  scs.subsession = scs.iter->next();
368  if (scs.subsession != NULL) {
369  if (!scs.subsession->initiate()) {
370  env << *rtspClient << "Failed to initiate the \"" << *scs.subsession
371  << "\" subsession: " << env.getResultMsg() << "\n";
372  setupNextSubsession(
373  rtspClient); // give up on this subsession; go to the next one
374  } else {
375  env << *rtspClient << "Initiated the \"" << *scs.subsession
376  << "\" subsession (";
377  if (scs.subsession->rtcpIsMuxed()) {
378  env << "client port " << scs.subsession->clientPortNum();
379  } else {
380  env << "client ports " << scs.subsession->clientPortNum() << "-"
381  << scs.subsession->clientPortNum() + 1;
382  }
383  env << ")\n";
384 
385  // Continue setting up this subsession, by sending a RTSP "SETUP" command:
386  rtspClient->sendSetupCommand(*scs.subsession, continueAfterSETUP, False,
387  rtspUseTCP);
388  }
389  return;
390  }
391 
392  // We've finished setting up all of the subsessions. Now, send a RTSP "PLAY"
393  // command to start the streaming:
394  if (scs.session->absStartTime() != NULL) {
395  // Special case: The stream is indexed by 'absolute' time, so send an
396  // appropriate "PLAY" command:
397  rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY,
398  scs.session->absStartTime(),
399  scs.session->absEndTime());
400  } else {
401  scs.duration = scs.session->playEndTime() - scs.session->playStartTime();
402  rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY);
403  }
404 }
405 
406 void continueAfterSETUP(RTSPClient* rtspClient, int resultCode,
407  char* resultString) {
408  do {
409  UsageEnvironment& env = rtspClient->envir(); // alias
410  StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias
411 
412  if (resultCode != 0) {
413  env << *rtspClient << "Failed to set up the \"" << *scs.subsession
414  << "\" subsession: " << resultString << "\n";
415  break;
416  }
417 
418  env << *rtspClient << "Set up the \"" << *scs.subsession
419  << "\" subsession (";
420  if (scs.subsession->rtcpIsMuxed()) {
421  env << "client port " << scs.subsession->clientPortNum();
422  } else {
423  env << "client ports " << scs.subsession->clientPortNum() << "-"
424  << scs.subsession->clientPortNum() + 1;
425  }
426  env << ")\n";
427 
428  // Having successfully setup the subsession, create a data sink for it, and
429  // call "startPlaying()" on it. (This will prepare the data sink to receive
430  // data; the actual flow of data from the client won't start happening until
431  // later, after we've sent a RTSP "PLAY" command.)
432 
433  scs.subsession->sink =
434  OurSink::createNew(env, *scs.subsession, rtspClient->url());
435  // perhaps use your own custom "MediaSink" subclass instead
436  if (scs.subsession->sink == NULL) {
437  env << *rtspClient << "Failed to create a data sink for the \""
438  << *scs.subsession << "\" subsession: " << env.getResultMsg() << "\n";
439  break;
440  }
441 
442  env << *rtspClient << "Created a data sink for the \"" << *scs.subsession
443  << "\" subsession\n";
444  scs.subsession->miscPtr =
445  rtspClient; // a hack to let subsession handler functions get the
446  // "RTSPClient" from the subsession
447  scs.subsession->sink->startPlaying(*(scs.subsession->readSource()),
448  subsessionAfterPlaying, scs.subsession);
449  // Also set a handler to be called if a RTCP "BYE" arrives for this
450  // subsession:
451  if (scs.subsession->rtcpInstance() != NULL) {
452  scs.subsession->rtcpInstance()->setByeWithReasonHandler(
453  subsessionByeHandler, scs.subsession);
454  }
455  } while (0);
456  delete[] resultString;
457 
458  // Set up the next subsession, if any:
459  setupNextSubsession(rtspClient);
460 }
461 
462 void continueAfterPLAY(RTSPClient* rtspClient, int resultCode,
463  char* resultString) {
464  Boolean success = False;
465 
466  do {
467  UsageEnvironment& env = rtspClient->envir(); // alias
468  StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias
469 
470  if (resultCode != 0) {
471  env << *rtspClient << "Failed to start playing session: " << resultString
472  << "\n";
473  break;
474  }
475 
476  // Set a timer to be handled at the end of the stream's expected duration
477  // (if the stream does not already signal its end using a RTCP "BYE"). This
478  // is optional. If, instead, you want to keep the stream active - e.g., so
479  // you can later 'seek' back within it and do another RTSP "PLAY" - then you
480  // can omit this code. (Alternatively, if you don't want to receive the
481  // entire stream, you could set this timer for some shorter value.)
482  if (scs.duration > 0) {
483  unsigned const delaySlop =
484  2; // number of seconds extra to delay, after the stream's expected
485  // duration. (This is optional.)
486  scs.duration += delaySlop;
487  unsigned uSecsToDelay = (unsigned)(scs.duration * 1000000);
488  scs.streamTimerTask = env.taskScheduler().scheduleDelayedTask(
489  uSecsToDelay, (TaskFunc*)streamTimerHandler, rtspClient);
490  }
491 
492  env << *rtspClient << "Started playing session";
493  if (scs.duration > 0) {
494  env << " (for up to " << scs.duration << " seconds)";
495  }
496  env << "...\n";
497 
498  success = True;
499  } while (0);
500  delete[] resultString;
501 
502  if (!success) {
503  // An unrecoverable error occurred with this stream.
504  shutdownStream(rtspClient);
505  }
506 }
507 
508 // Implementation of the other event handlers:
509 
510 void subsessionAfterPlaying(void* clientData) {
511  MediaSubsession* subsession = (MediaSubsession*)clientData;
512  RTSPClient* rtspClient = (RTSPClient*)(subsession->miscPtr);
513 
514  // Begin by closing this subsession's stream:
515  Medium::close(subsession->sink);
516  subsession->sink = NULL;
517 
518  // Next, check whether *all* subsessions' streams have now been closed:
519  MediaSession& session = subsession->parentSession();
520  MediaSubsessionIterator iter(session);
521  while ((subsession = iter.next()) != NULL) {
522  if (subsession->sink != NULL) return; // this subsession is still active
523  }
524 
525  // All subsessions' streams have now been closed, so shutdown the client:
526  shutdownStream(rtspClient);
527 }
528 
529 void subsessionByeHandler(void* clientData, char const* reason) {
530  MediaSubsession* subsession = (MediaSubsession*)clientData;
531  RTSPClient* rtspClient = (RTSPClient*)subsession->miscPtr;
532  UsageEnvironment& env = rtspClient->envir(); // alias
533 
534  env << *rtspClient << "Received RTCP \"BYE\"";
535  if (reason != NULL) {
536  env << " (reason:\"" << reason << "\")";
537  delete[] (char*)reason;
538  }
539  env << " on \"" << *subsession << "\" subsession\n";
540 
541  // Now act as if the subsession had closed:
542  subsessionAfterPlaying(subsession);
543 }
544 
545 void streamTimerHandler(void* clientData) {
546  OurRTSPClient* rtspClient = (OurRTSPClient*)clientData;
547  StreamClientState& scs = rtspClient->scs; // alias
548 
549  scs.streamTimerTask = NULL;
550 
551  // Shut down the stream:
552  shutdownStream(rtspClient);
553 }
554 
555 void shutdownStream(RTSPClient* rtspClient, int exitCode) {
556  UsageEnvironment& env = rtspClient->envir(); // alias
557  StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias
558 
559  // First, check whether any subsessions have still to be closed:
560  if (scs.session != NULL) {
561  Boolean someSubsessionsWereActive = False;
562  MediaSubsessionIterator iter(*scs.session);
563  MediaSubsession* subsession;
564 
565  while ((subsession = iter.next()) != NULL) {
566  if (subsession->sink != NULL) {
567  Medium::close(subsession->sink);
568  subsession->sink = NULL;
569 
570  if (subsession->rtcpInstance() != NULL) {
571  subsession->rtcpInstance()->setByeHandler(
572  NULL, NULL); // in case the server sends a RTCP "BYE" while
573  // handling "TEARDOWN"
574  }
575 
576  someSubsessionsWereActive = True;
577  }
578  }
579 
580  if (someSubsessionsWereActive) {
581  // Send a RTSP "TEARDOWN" command, to tell the server to shutdown the
582  // stream. Don't bother handling the response to the "TEARDOWN".
583  rtspClient->sendTeardownCommand(*scs.session, NULL);
584  }
585  }
586 
587  env << *rtspClient << "Closing the stream.\n";
588  Medium::close(rtspClient);
589  // Note that this will also cause this stream's "StreamClientState" structure
590  // to get reclaimed.
591 
592  if (--rtspClientCount == 0) {
593  // The final stream has ended, so exit the application now.
594  // (Of course, if you're embedding this code into your own application, you
595  // might want to comment this out, and replace it with
596  // "rtspEventLoopWatchVariable = 1;", so that we leave the LIVE555 event loop,
597  // and continue running "main()".)
598  // exit(exitCode);
599  rtspEventLoopWatchVariable = 1;
600  return;
601  }
602 }
603 
604 // Implementation of "OurRTSPClient":
605 
606 OurRTSPClient* OurRTSPClient::createNew(UsageEnvironment& env,
607  char const* rtspURL, int verbosityLevel,
608  char const* applicationName,
609  portNumBits tunnelOverHTTPPortNum) {
610  return new OurRTSPClient(env, rtspURL, verbosityLevel, applicationName,
611  tunnelOverHTTPPortNum);
612 }
613 
614 OurRTSPClient::OurRTSPClient(UsageEnvironment& env, char const* rtspURL,
615  int verbosityLevel, char const* applicationName,
616  portNumBits tunnelOverHTTPPortNum)
617  : RTSPClient(env, rtspURL, verbosityLevel, applicationName,
618  tunnelOverHTTPPortNum, -1) {}
619 
620 OurRTSPClient::~OurRTSPClient() {}
621 
622 // Implementation of "StreamClientState":
623 
624 StreamClientState::StreamClientState()
625  : iter(NULL),
626  session(NULL),
627  subsession(NULL),
628  streamTimerTask(NULL),
629  duration(0.0) {}
630 
631 StreamClientState::~StreamClientState() {
632  delete iter;
633  if (session != NULL) {
634  // We also need to delete "session", and unschedule "streamTimerTask" (if
635  // set)
636  UsageEnvironment& env = session->envir(); // alias
637 
638  env.taskScheduler().unscheduleDelayedTask(streamTimerTask);
639  Medium::close(session);
640  }
641 }
642 
643 // Implementation of "OurSink":
644 
645 OurSink* OurSink::createNew(UsageEnvironment& env,
646  MediaSubsession& subsession,
647  char const* streamId) {
648  return new OurSink(env, subsession, streamId);
649 }
650 
651 OurSink::OurSink(UsageEnvironment& env, MediaSubsession& subsession,
652  char const* streamId)
653  : MediaSink(env), fSubsession(subsession) {
654  fStreamId = strDup(streamId);
655  fReceiveBuffer = new u_int8_t[rtspSinkReceiveBufferSize];
656 }
657 
658 OurSink::~OurSink() {
659  delete[] fReceiveBuffer;
660  delete[] fStreamId;
661 }
662 
663 void OurSink::afterGettingFrame(void* clientData, unsigned frameSize,
664  unsigned numTruncatedBytes,
665  struct timeval presentationTime,
666  unsigned durationInMicroseconds) {
667  OurSink* sink = (OurSink*)clientData;
668  sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime,
669  durationInMicroseconds);
670 }
671 
672 void OurSink::afterGettingFrame(unsigned frameSize,
673  unsigned numTruncatedBytes,
674  struct timeval presentationTime,
675  unsigned /*durationInMicroseconds*/) {
676  // We've just received a frame of data. (Optionally) print out information
677  // about it:
678 #ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
679  if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
680  envir() << fSubsession.mediumName() << "/" << fSubsession.codecName()
681  << ":\tReceived " << frameSize << " bytes";
682  if (numTruncatedBytes > 0)
683  envir() << " (with " << numTruncatedBytes << " bytes truncated)";
684  char uSecsStr[6 + 1]; // used to output the 'microseconds' part of the
685  // presentation time
686  snprintf(uSecsStr,7 , "%06u", (unsigned)presentationTime.tv_usec);
687  envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "."
688  << uSecsStr;
689  if (fSubsession.rtpSource() != NULL &&
690  !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
691  envir() << "!"; // mark the debugging output to indicate that this
692  // presentation time is not RTCP-synchronized
693  }
694 #ifdef DEBUG_PRINT_NPT
695  envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime);
696 #endif
697  envir() << "\n";
698 #endif
699 
700  // Decode the data
701  if (rtspOutput) {
702  size_t writtenSize = rtspOutput->write(fReceiveBuffer, frameSize);
703  assert(writtenSize == frameSize);
704  }
705 
706  // Then continue, to request the next frame of data:
707  continuePlaying();
708 }
709 
710 Boolean OurSink::continuePlaying() {
711  if (fSource == NULL) return False; // sanity check (should not happen)
712 
713  // Request the next frame of data from our input source. "afterGettingFrame()"
714  // will get called later, when it arrives:
715  fSource->getNextFrame(fReceiveBuffer, rtspSinkReceiveBufferSize,
716  afterGettingFrame, this, onSourceClosure, this);
717  return True;
718 }
719 
720 } // namespace audiotools_rtsp
A simple RTSPClient using https://github.com/pschatzmann/arduino-live555.
Definition: AudioClientRTSP.h:70
bool login()
login to wifi: optional convinience method. You can also just start Wifi the normal way
Definition: AudioClientRTSP.h:153
bool begin(const char *url, Print &out)
Starts the processing.
Definition: AudioClientRTSP.h:101
void setLogin(const char *ssid, const char *password)
login to wifi: optional convinience method. You can also just start Wifi the normal way
Definition: AudioClientRTSP.h:95
void loop()
to be called in Arduino loop when blocking = false
Definition: AudioClientRTSP.h:127
Definition: NoArduino.h:58
Definition: AudioClientRTSP.h:238
Definition: AudioClientRTSP.h:263
Definition: AudioClientRTSP.h:217
Generic Implementation of sound input and output for desktop environments using portaudio.
Definition: AudioConfig.h:868
AudioTools internal: rtsp.
Definition: AudioClientRTSP.h:48