LCOV - code coverage report
Current view: directory - content/media/webm - nsWebMReader.cpp (source / functions) Found Hit Coverage
Test: app.info Lines: 406 0 0.0 %
Date: 2012-06-02 Functions: 20 0 0.0 %

       1                 : /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
       2                 : /* vim:set ts=2 sw=2 sts=2 et cindent: */
       3                 : /* ***** BEGIN LICENSE BLOCK *****
       4                 :  * Version: MPL 1.1/GPL 2.0/LGPL 2.1
       5                 :  *
       6                 :  * The contents of this file are subject to the Mozilla Public License Version
       7                 :  * 1.1 (the "License"); you may not use this file except in compliance with
       8                 :  * the License. You may obtain a copy of the License at
       9                 :  * http://www.mozilla.org/MPL/
      10                 :  *
      11                 :  * Software distributed under the License is distributed on an "AS IS" basis,
      12                 :  * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
      13                 :  * for the specific language governing rights and limitations under the
      14                 :  * License.
      15                 :  *
      16                 :  * The Original Code is Mozilla code.
      17                 :  *
      18                 :  * The Initial Developer of the Original Code is the Mozilla Corporation.
      19                 :  * Portions created by the Initial Developer are Copyright (C) 2007
      20                 :  * the Initial Developer. All Rights Reserved.
      21                 :  *
      22                 :  * Contributor(s):
      23                 :  *  Chris Double <chris.double@double.co.nz>
      24                 :  *  Chris Pearce <chris@pearce.org.nz>
      25                 :  *
      26                 :  * Alternatively, the contents of this file may be used under the terms of
      27                 :  * either the GNU General Public License Version 2 or later (the "GPL"), or
      28                 :  * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
      29                 :  * in which case the provisions of the GPL or the LGPL are applicable instead
      30                 :  * of those above. If you wish to allow use of your version of this file only
      31                 :  * under the terms of either the GPL or the LGPL, and not to allow others to
      32                 :  * use your version of this file under the terms of the MPL, indicate your
      33                 :  * decision by deleting the provisions above and replace them with the notice
      34                 :  * and other provisions required by the GPL or the LGPL. If you do not delete
      35                 :  * the provisions above, a recipient may use your version of this file under
      36                 :  * the terms of any one of the MPL, the GPL or the LGPL.
      37                 :  *
      38                 :  * ***** END LICENSE BLOCK ***** */
      39                 : #include "nsError.h"
      40                 : #include "nsBuiltinDecoderStateMachine.h"
      41                 : #include "nsBuiltinDecoder.h"
      42                 : #include "MediaResource.h"
      43                 : #include "nsWebMReader.h"
      44                 : #include "nsWebMBufferedParser.h"
      45                 : #include "VideoUtils.h"
      46                 : #include "nsTimeRanges.h"
      47                 : #include "mozilla/Preferences.h"
      48                 : 
      49                 : #define VPX_DONT_DEFINE_STDINT_TYPES
      50                 : #include "vpx/vp8dx.h"
      51                 : #include "vpx/vpx_decoder.h"
      52                 : 
      53                 : using namespace mozilla;
      54                 : using namespace mozilla::layers;
      55                 : 
      56                 : // Un-comment to enable logging of seek bisections.
      57                 : //#define SEEK_LOGGING
      58                 : 
      59                 : #ifdef PR_LOGGING
      60                 : extern PRLogModuleInfo* gBuiltinDecoderLog;
      61                 : #define LOG(type, msg) PR_LOG(gBuiltinDecoderLog, type, msg)
      62                 : #ifdef SEEK_LOGGING
      63                 : #define SEEK_LOG(type, msg) PR_LOG(gBuiltinDecoderLog, type, msg)
      64                 : #else
      65                 : #define SEEK_LOG(type, msg)
      66                 : #endif
      67                 : #else
      68                 : #define LOG(type, msg)
      69                 : #define SEEK_LOG(type, msg)
      70                 : #endif
      71                 : 
      72                 : static const unsigned NS_PER_USEC = 1000;
      73                 : static const double NS_PER_S = 1e9;
      74                 : 
      75                 : // If a seek request is within SEEK_DECODE_MARGIN microseconds of the
      76                 : // current time, decode ahead from the current frame rather than performing
      77                 : // a full seek.
      78                 : static const int SEEK_DECODE_MARGIN = 250000;
      79                 : 
      80                 : template <>
      81                 : class nsAutoRefTraits<NesteggPacketHolder> : public nsPointerRefTraits<NesteggPacketHolder>
      82               0 : {
      83                 : public:
      84               0 :   static void Release(NesteggPacketHolder* aHolder) { delete aHolder; }
      85                 : };
      86                 : 
      87                 : // Functions for reading and seeking using MediaResource required for
      88                 : // nestegg_io. The 'user data' passed to these functions is the
      89                 : // decoder from which the media resource is obtained.
      90               0 : static int webm_read(void *aBuffer, size_t aLength, void *aUserData)
      91                 : {
      92               0 :   NS_ASSERTION(aUserData, "aUserData must point to a valid nsBuiltinDecoder");
      93               0 :   nsBuiltinDecoder* decoder = reinterpret_cast<nsBuiltinDecoder*>(aUserData);
      94               0 :   MediaResource* resource = decoder->GetResource();
      95               0 :   NS_ASSERTION(resource, "Decoder has no media resource");
      96                 : 
      97               0 :   nsresult rv = NS_OK;
      98               0 :   bool eof = false;
      99                 : 
     100               0 :   char *p = static_cast<char *>(aBuffer);
     101               0 :   while (NS_SUCCEEDED(rv) && aLength > 0) {
     102               0 :     PRUint32 bytes = 0;
     103               0 :     rv = resource->Read(p, aLength, &bytes);
     104               0 :     if (bytes == 0) {
     105               0 :       eof = true;
     106               0 :       break;
     107                 :     }
     108               0 :     decoder->NotifyBytesConsumed(bytes);
     109               0 :     aLength -= bytes;
     110               0 :     p += bytes;
     111                 :   }
     112                 : 
     113               0 :   return NS_FAILED(rv) ? -1 : eof ? 0 : 1;
     114                 : }
     115                 : 
     116               0 : static int webm_seek(int64_t aOffset, int aWhence, void *aUserData)
     117                 : {
     118               0 :   NS_ASSERTION(aUserData, "aUserData must point to a valid nsBuiltinDecoder");
     119               0 :   nsBuiltinDecoder* decoder = reinterpret_cast<nsBuiltinDecoder*>(aUserData);
     120               0 :   MediaResource* resource = decoder->GetResource();
     121               0 :   NS_ASSERTION(resource, "Decoder has no media resource");
     122               0 :   nsresult rv = resource->Seek(aWhence, aOffset);
     123               0 :   return NS_SUCCEEDED(rv) ? 0 : -1;
     124                 : }
     125                 : 
     126               0 : static int64_t webm_tell(void *aUserData)
     127                 : {
     128               0 :   NS_ASSERTION(aUserData, "aUserData must point to a valid nsBuiltinDecoder");
     129               0 :   nsBuiltinDecoder* decoder = reinterpret_cast<nsBuiltinDecoder*>(aUserData);
     130               0 :   MediaResource* resource = decoder->GetResource();
     131               0 :   NS_ASSERTION(resource, "Decoder has no media resource");
     132               0 :   return resource->Tell();
     133                 : }
     134                 : 
     135               0 : nsWebMReader::nsWebMReader(nsBuiltinDecoder* aDecoder)
     136                 :   : nsBuiltinDecoderReader(aDecoder),
     137                 :   mContext(nsnull),
     138                 :   mPacketCount(0),
     139                 :   mChannels(0),
     140                 :   mVideoTrack(0),
     141                 :   mAudioTrack(0),
     142                 :   mAudioStartUsec(-1),
     143                 :   mAudioFrames(0),
     144                 :   mForceStereoMode(0),
     145                 :   mHasVideo(false),
     146                 :   mHasAudio(false),
     147               0 :   mStereoModeForced(false)
     148                 : {
     149               0 :   MOZ_COUNT_CTOR(nsWebMReader);
     150                 : 
     151                 :   mStereoModeForced =
     152               0 :     NS_SUCCEEDED(Preferences::GetInt(
     153                 :           "media.webm.force_stereo_mode",
     154               0 :           &mForceStereoMode));
     155               0 : }
     156                 : 
     157               0 : nsWebMReader::~nsWebMReader()
     158                 : {
     159               0 :   Cleanup();
     160                 : 
     161               0 :   mVideoPackets.Reset();
     162               0 :   mAudioPackets.Reset();
     163                 : 
     164               0 :   vpx_codec_destroy(&mVP8);
     165                 : 
     166               0 :   vorbis_block_clear(&mVorbisBlock);
     167               0 :   vorbis_dsp_clear(&mVorbisDsp);
     168               0 :   vorbis_info_clear(&mVorbisInfo);
     169               0 :   vorbis_comment_clear(&mVorbisComment);
     170                 : 
     171               0 :   MOZ_COUNT_DTOR(nsWebMReader);
     172               0 : }
     173                 : 
     174               0 : nsresult nsWebMReader::Init(nsBuiltinDecoderReader* aCloneDonor)
     175                 : {
     176               0 :   if (vpx_codec_dec_init(&mVP8, vpx_codec_vp8_dx(), NULL, 0)) {
     177               0 :     return NS_ERROR_FAILURE;
     178                 :   }
     179                 : 
     180               0 :   vorbis_info_init(&mVorbisInfo);
     181               0 :   vorbis_comment_init(&mVorbisComment);
     182               0 :   memset(&mVorbisDsp, 0, sizeof(vorbis_dsp_state));
     183               0 :   memset(&mVorbisBlock, 0, sizeof(vorbis_block));
     184                 : 
     185               0 :   if (aCloneDonor) {
     186               0 :     mBufferedState = static_cast<nsWebMReader*>(aCloneDonor)->mBufferedState;
     187                 :   } else {
     188               0 :     mBufferedState = new nsWebMBufferedState;
     189                 :   }
     190                 : 
     191               0 :   return NS_OK;
     192                 : }
     193                 : 
     194               0 : nsresult nsWebMReader::ResetDecode()
     195                 : {
     196               0 :   mAudioFrames = 0;
     197               0 :   mAudioStartUsec = -1;
     198               0 :   nsresult res = NS_OK;
     199               0 :   if (NS_FAILED(nsBuiltinDecoderReader::ResetDecode())) {
     200               0 :     res = NS_ERROR_FAILURE;
     201                 :   }
     202                 : 
     203                 :   // Ignore failed results from vorbis_synthesis_restart. They
     204                 :   // aren't fatal and it fails when ResetDecode is called at a
     205                 :   // time when no vorbis data has been read.
     206               0 :   vorbis_synthesis_restart(&mVorbisDsp);
     207                 : 
     208               0 :   mVideoPackets.Reset();
     209               0 :   mAudioPackets.Reset();
     210                 : 
     211               0 :   return res;
     212                 : }
     213                 : 
     214               0 : void nsWebMReader::Cleanup()
     215                 : {
     216               0 :   if (mContext) {
     217               0 :     nestegg_destroy(mContext);
     218               0 :     mContext = nsnull;
     219                 :   }
     220               0 : }
     221                 : 
     222               0 : nsresult nsWebMReader::ReadMetadata(nsVideoInfo* aInfo)
     223                 : {
     224               0 :   NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
     225                 : 
     226                 :   nestegg_io io;
     227               0 :   io.read = webm_read;
     228               0 :   io.seek = webm_seek;
     229               0 :   io.tell = webm_tell;
     230               0 :   io.userdata = static_cast<nsBuiltinDecoder*>(mDecoder);
     231               0 :   int r = nestegg_init(&mContext, io, NULL);
     232               0 :   if (r == -1) {
     233               0 :     return NS_ERROR_FAILURE;
     234                 :   }
     235                 : 
     236               0 :   uint64_t duration = 0;
     237               0 :   r = nestegg_duration(mContext, &duration);
     238               0 :   if (r == 0) {
     239               0 :     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
     240               0 :     mDecoder->GetStateMachine()->SetDuration(duration / NS_PER_USEC);
     241                 :   }
     242                 : 
     243               0 :   unsigned int ntracks = 0;
     244               0 :   r = nestegg_track_count(mContext, &ntracks);
     245               0 :   if (r == -1) {
     246               0 :     Cleanup();
     247               0 :     return NS_ERROR_FAILURE;
     248                 :   }
     249                 : 
     250               0 :   mInfo.mHasAudio = false;
     251               0 :   mInfo.mHasVideo = false;
     252               0 :   for (PRUint32 track = 0; track < ntracks; ++track) {
     253               0 :     int id = nestegg_track_codec_id(mContext, track);
     254               0 :     if (id == -1) {
     255               0 :       Cleanup();
     256               0 :       return NS_ERROR_FAILURE;
     257                 :     }
     258               0 :     int type = nestegg_track_type(mContext, track);
     259               0 :     if (!mHasVideo && type == NESTEGG_TRACK_VIDEO) {
     260                 :       nestegg_video_params params;
     261               0 :       r = nestegg_track_video_params(mContext, track, &params);
     262               0 :       if (r == -1) {
     263               0 :         Cleanup();
     264               0 :         return NS_ERROR_FAILURE;
     265                 :       }
     266                 : 
     267                 :       // Picture region, taking into account cropping, before scaling
     268                 :       // to the display size.
     269                 :       nsIntRect pictureRect(params.crop_left,
     270                 :                             params.crop_top,
     271                 :                             params.width - (params.crop_right + params.crop_left),
     272               0 :                             params.height - (params.crop_bottom + params.crop_top));
     273                 : 
     274                 :       // If the cropping data appears invalid then use the frame data
     275               0 :       if (pictureRect.width <= 0 ||
     276                 :           pictureRect.height <= 0 ||
     277                 :           pictureRect.x < 0 ||
     278                 :           pictureRect.y < 0)
     279                 :       {
     280               0 :         pictureRect.x = 0;
     281               0 :         pictureRect.y = 0;
     282               0 :         pictureRect.width = params.width;
     283               0 :         pictureRect.height = params.height;
     284                 :       }
     285                 : 
     286                 :       // Validate the container-reported frame and pictureRect sizes. This ensures
     287                 :       // that our video frame creation code doesn't overflow.
     288               0 :       nsIntSize displaySize(params.display_width, params.display_height);
     289               0 :       nsIntSize frameSize(params.width, params.height);
     290               0 :       if (!nsVideoInfo::ValidateVideoRegion(frameSize, pictureRect, displaySize)) {
     291                 :         // Video track's frame sizes will overflow. Ignore the video track.
     292               0 :         continue;
     293                 :       }
     294                 : 
     295               0 :       mVideoTrack = track;
     296               0 :       mHasVideo = true;
     297               0 :       mInfo.mHasVideo = true;
     298                 : 
     299               0 :       mInfo.mDisplay = displaySize;
     300               0 :       mPicture = pictureRect;
     301               0 :       mInitialFrame = frameSize;
     302                 : 
     303               0 :       switch (params.stereo_mode) {
     304                 :       case NESTEGG_VIDEO_MONO:
     305               0 :         mInfo.mStereoMode = STEREO_MODE_MONO;
     306               0 :         break;
     307                 :       case NESTEGG_VIDEO_STEREO_LEFT_RIGHT:
     308               0 :         mInfo.mStereoMode = STEREO_MODE_LEFT_RIGHT;
     309               0 :         break;
     310                 :       case NESTEGG_VIDEO_STEREO_BOTTOM_TOP:
     311               0 :         mInfo.mStereoMode = STEREO_MODE_BOTTOM_TOP;
     312               0 :         break;
     313                 :       case NESTEGG_VIDEO_STEREO_TOP_BOTTOM:
     314               0 :         mInfo.mStereoMode = STEREO_MODE_TOP_BOTTOM;
     315               0 :         break;
     316                 :       case NESTEGG_VIDEO_STEREO_RIGHT_LEFT:
     317               0 :         mInfo.mStereoMode = STEREO_MODE_RIGHT_LEFT;
     318               0 :         break;
     319                 :       }
     320                 : 
     321                 :       // Switch only when stereo mode is explicitly set.
     322               0 :       if (mStereoModeForced) {
     323               0 :         switch (mForceStereoMode) {
     324                 :         case 1:
     325               0 :           mInfo.mStereoMode = STEREO_MODE_LEFT_RIGHT;
     326               0 :           break;
     327                 :         case 2:
     328               0 :           mInfo.mStereoMode = STEREO_MODE_RIGHT_LEFT;
     329               0 :           break;
     330                 :         case 3:
     331               0 :           mInfo.mStereoMode = STEREO_MODE_TOP_BOTTOM;
     332               0 :           break;
     333                 :         case 4:
     334               0 :           mInfo.mStereoMode = STEREO_MODE_BOTTOM_TOP;
     335               0 :           break;
     336                 :         default:
     337               0 :           mInfo.mStereoMode = STEREO_MODE_MONO;
     338                 :         }
     339               0 :       }
     340                 :     }
     341               0 :     else if (!mHasAudio && type == NESTEGG_TRACK_AUDIO) {
     342                 :       nestegg_audio_params params;
     343               0 :       r = nestegg_track_audio_params(mContext, track, &params);
     344               0 :       if (r == -1) {
     345               0 :         Cleanup();
     346               0 :         return NS_ERROR_FAILURE;
     347                 :       }
     348                 : 
     349               0 :       mAudioTrack = track;
     350               0 :       mHasAudio = true;
     351               0 :       mInfo.mHasAudio = true;
     352                 : 
     353                 :       // Get the Vorbis header data
     354               0 :       unsigned int nheaders = 0;
     355               0 :       r = nestegg_track_codec_data_count(mContext, track, &nheaders);
     356               0 :       if (r == -1 || nheaders != 3) {
     357               0 :         Cleanup();
     358               0 :         return NS_ERROR_FAILURE;
     359                 :       }
     360                 : 
     361               0 :       for (PRUint32 header = 0; header < nheaders; ++header) {
     362               0 :         unsigned char* data = 0;
     363               0 :         size_t length = 0;
     364                 : 
     365               0 :         r = nestegg_track_codec_data(mContext, track, header, &data, &length);
     366               0 :         if (r == -1) {
     367               0 :           Cleanup();
     368               0 :           return NS_ERROR_FAILURE;
     369                 :         }
     370                 : 
     371               0 :         ogg_packet opacket = InitOggPacket(data, length, header == 0, false, 0);
     372                 : 
     373                 :         r = vorbis_synthesis_headerin(&mVorbisInfo,
     374                 :                                       &mVorbisComment,
     375               0 :                                       &opacket);
     376               0 :         if (r != 0) {
     377               0 :           Cleanup();
     378               0 :           return NS_ERROR_FAILURE;
     379                 :         }
     380                 :       }
     381                 : 
     382               0 :       r = vorbis_synthesis_init(&mVorbisDsp, &mVorbisInfo);
     383               0 :       if (r != 0) {
     384               0 :         Cleanup();
     385               0 :         return NS_ERROR_FAILURE;
     386                 :       }
     387                 : 
     388               0 :       r = vorbis_block_init(&mVorbisDsp, &mVorbisBlock);
     389               0 :       if (r != 0) {
     390               0 :         Cleanup();
     391               0 :         return NS_ERROR_FAILURE;
     392                 :       }
     393                 : 
     394               0 :       mInfo.mAudioRate = mVorbisDsp.vi->rate;
     395               0 :       mInfo.mAudioChannels = mVorbisDsp.vi->channels;
     396               0 :       mChannels = mInfo.mAudioChannels;
     397                 :     }
     398                 :   }
     399                 : 
     400               0 :   *aInfo = mInfo;
     401                 : 
     402               0 :   return NS_OK;
     403                 : }
     404                 : 
     405               0 : ogg_packet nsWebMReader::InitOggPacket(unsigned char* aData,
     406                 :                                        size_t aLength,
     407                 :                                        bool aBOS,
     408                 :                                        bool aEOS,
     409                 :                                        PRInt64 aGranulepos)
     410                 : {
     411                 :   ogg_packet packet;
     412               0 :   packet.packet = aData;
     413               0 :   packet.bytes = aLength;
     414               0 :   packet.b_o_s = aBOS;
     415               0 :   packet.e_o_s = aEOS;
     416               0 :   packet.granulepos = aGranulepos;
     417               0 :   packet.packetno = mPacketCount++;
     418                 :   return packet;
     419                 : }
     420                 :  
     421               0 : bool nsWebMReader::DecodeAudioPacket(nestegg_packet* aPacket, PRInt64 aOffset)
     422                 : {
     423               0 :   NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
     424                 : 
     425               0 :   int r = 0;
     426               0 :   unsigned int count = 0;
     427               0 :   r = nestegg_packet_count(aPacket, &count);
     428               0 :   if (r == -1) {
     429               0 :     return false;
     430                 :   }
     431                 : 
     432               0 :   uint64_t tstamp = 0;
     433               0 :   r = nestegg_packet_tstamp(aPacket, &tstamp);
     434               0 :   if (r == -1) {
     435               0 :     return false;
     436                 :   }
     437                 : 
     438               0 :   const PRUint32 rate = mVorbisDsp.vi->rate;
     439               0 :   PRUint64 tstamp_usecs = tstamp / NS_PER_USEC;
     440               0 :   if (mAudioStartUsec == -1) {
     441                 :     // This is the first audio chunk. Assume the start time of our decode
     442                 :     // is the start of this chunk.
     443               0 :     mAudioStartUsec = tstamp_usecs;
     444                 :   }
     445                 :   // If there's a gap between the start of this audio chunk and the end of
     446                 :   // the previous audio chunk, we need to increment the packet count so that
     447                 :   // the vorbis decode doesn't use data from before the gap to help decode
     448                 :   // from after the gap.
     449               0 :   CheckedInt64 tstamp_frames = UsecsToFrames(tstamp_usecs, rate);
     450               0 :   CheckedInt64 decoded_frames = UsecsToFrames(mAudioStartUsec, rate);
     451               0 :   if (!tstamp_frames.valid() || !decoded_frames.valid()) {
     452               0 :     NS_WARNING("Int overflow converting WebM times to frames");
     453               0 :     return false;
     454                 :   }
     455               0 :   decoded_frames += mAudioFrames;
     456               0 :   if (!decoded_frames.valid()) {
     457               0 :     NS_WARNING("Int overflow adding decoded_frames");
     458               0 :     return false;
     459                 :   }
     460               0 :   if (tstamp_frames.value() > decoded_frames.value()) {
     461                 : #ifdef DEBUG
     462               0 :     CheckedInt64 usecs = FramesToUsecs(tstamp_frames.value() - decoded_frames.value(), rate);
     463               0 :     LOG(PR_LOG_DEBUG, ("WebMReader detected gap of %lld, %lld frames, in audio stream\n",
     464                 :       usecs.valid() ? usecs.value(): -1,
     465                 :       tstamp_frames.value() - decoded_frames.value()));
     466                 : #endif
     467               0 :     mPacketCount++;
     468               0 :     mAudioStartUsec = tstamp_usecs;
     469               0 :     mAudioFrames = 0;
     470                 :   }
     471                 : 
     472               0 :   PRInt32 total_frames = 0;
     473               0 :   for (PRUint32 i = 0; i < count; ++i) {
     474                 :     unsigned char* data;
     475                 :     size_t length;
     476               0 :     r = nestegg_packet_data(aPacket, i, &data, &length);
     477               0 :     if (r == -1) {
     478               0 :       return false;
     479                 :     }
     480                 : 
     481               0 :     ogg_packet opacket = InitOggPacket(data, length, false, false, -1);
     482                 : 
     483               0 :     if (vorbis_synthesis(&mVorbisBlock, &opacket) != 0) {
     484               0 :       return false;
     485                 :     }
     486                 : 
     487               0 :     if (vorbis_synthesis_blockin(&mVorbisDsp,
     488               0 :                                  &mVorbisBlock) != 0) {
     489               0 :       return false;
     490                 :     }
     491                 : 
     492               0 :     VorbisPCMValue** pcm = 0;
     493               0 :     PRInt32 frames = 0;
     494               0 :     while ((frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm)) > 0) {
     495               0 :       nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[frames * mChannels]);
     496               0 :       for (PRUint32 j = 0; j < mChannels; ++j) {
     497               0 :         VorbisPCMValue* channel = pcm[j];
     498               0 :         for (PRUint32 i = 0; i < PRUint32(frames); ++i) {
     499               0 :           buffer[i*mChannels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
     500                 :         }
     501                 :       }
     502                 : 
     503               0 :       CheckedInt64 duration = FramesToUsecs(frames, rate);
     504               0 :       if (!duration.valid()) {
     505               0 :         NS_WARNING("Int overflow converting WebM audio duration");
     506               0 :         return false;
     507                 :       }
     508               0 :       CheckedInt64 total_duration = FramesToUsecs(total_frames, rate);
     509               0 :       if (!total_duration.valid()) {
     510               0 :         NS_WARNING("Int overflow converting WebM audio total_duration");
     511               0 :         return false;
     512                 :       }
     513                 :       
     514               0 :       CheckedInt64 time = total_duration + tstamp_usecs;
     515               0 :       if (!time.valid()) {
     516               0 :         NS_WARNING("Int overflow adding total_duration and tstamp_usecs");
     517               0 :         nestegg_free_packet(aPacket);
     518               0 :         return PR_FALSE;
     519                 :       };
     520                 : 
     521               0 :       total_frames += frames;
     522                 :       mAudioQueue.Push(new AudioData(aOffset,
     523                 :                                      time.value(),
     524                 :                                      duration.value(),
     525                 :                                      frames,
     526                 :                                      buffer.forget(),
     527               0 :                                      mChannels));
     528               0 :       mAudioFrames += frames;
     529               0 :       if (vorbis_synthesis_read(&mVorbisDsp, frames) != 0) {
     530               0 :         return false;
     531                 :       }
     532                 :     }
     533                 :   }
     534                 : 
     535               0 :   return true;
     536                 : }
     537                 : 
     538               0 : nsReturnRef<NesteggPacketHolder> nsWebMReader::NextPacket(TrackType aTrackType)
     539                 : {
     540                 :   // The packet queue that packets will be pushed on if they
     541                 :   // are not the type we are interested in.
     542                 :   PacketQueue& otherPackets = 
     543               0 :     aTrackType == VIDEO ? mAudioPackets : mVideoPackets;
     544                 : 
     545                 :   // The packet queue for the type that we are interested in.
     546                 :   PacketQueue &packets =
     547               0 :     aTrackType == VIDEO ? mVideoPackets : mAudioPackets;
     548                 : 
     549                 :   // Flag to indicate that we do need to playback these types of
     550                 :   // packets.
     551               0 :   bool hasType = aTrackType == VIDEO ? mHasVideo : mHasAudio;
     552                 : 
     553                 :   // Flag to indicate that we do need to playback the other type
     554                 :   // of track.
     555               0 :   bool hasOtherType = aTrackType == VIDEO ? mHasAudio : mHasVideo;
     556                 : 
     557                 :   // Track we are interested in
     558               0 :   PRUint32 ourTrack = aTrackType == VIDEO ? mVideoTrack : mAudioTrack;
     559                 : 
     560                 :   // Value of other track
     561               0 :   PRUint32 otherTrack = aTrackType == VIDEO ? mAudioTrack : mVideoTrack;
     562                 : 
     563               0 :   nsAutoRef<NesteggPacketHolder> holder;
     564                 : 
     565               0 :   if (packets.GetSize() > 0) {
     566               0 :     holder.own(packets.PopFront());
     567                 :   } else {
     568                 :     // Keep reading packets until we find a packet
     569                 :     // for the track we want.
     570               0 :     do {
     571                 :       nestegg_packet* packet;
     572               0 :       int r = nestegg_read_packet(mContext, &packet);
     573               0 :       if (r <= 0) {
     574               0 :         return nsReturnRef<NesteggPacketHolder>();
     575                 :       }
     576               0 :       PRInt64 offset = mDecoder->GetResource()->Tell();
     577               0 :       holder.own(new NesteggPacketHolder(packet, offset));
     578                 : 
     579               0 :       unsigned int track = 0;
     580               0 :       r = nestegg_packet_track(packet, &track);
     581               0 :       if (r == -1) {
     582               0 :         return nsReturnRef<NesteggPacketHolder>();
     583                 :       }
     584                 : 
     585               0 :       if (hasOtherType && otherTrack == track) {
     586                 :         // Save the packet for when we want these packets
     587               0 :         otherPackets.Push(holder.disown());
     588               0 :         continue;
     589                 :       }
     590                 : 
     591                 :       // The packet is for the track we want to play
     592               0 :       if (hasType && ourTrack == track) {
     593               0 :         break;
     594                 :       }
     595                 :     } while (true);
     596                 :   }
     597                 : 
     598               0 :   return holder.out();
     599                 : }
     600                 : 
     601               0 : bool nsWebMReader::DecodeAudioData()
     602                 : {
     603               0 :   NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
     604                 : 
     605               0 :   nsAutoRef<NesteggPacketHolder> holder(NextPacket(AUDIO));
     606               0 :   if (!holder) {
     607               0 :     mAudioQueue.Finish();
     608               0 :     return false;
     609                 :   }
     610                 : 
     611               0 :   return DecodeAudioPacket(holder->mPacket, holder->mOffset);
     612                 : }
     613                 : 
     614               0 : bool nsWebMReader::DecodeVideoFrame(bool &aKeyframeSkip,
     615                 :                                       PRInt64 aTimeThreshold)
     616                 : {
     617               0 :   NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
     618                 : 
     619                 :   // Record number of frames decoded and parsed. Automatically update the
     620                 :   // stats counters using the AutoNotifyDecoded stack-based class.
     621               0 :   PRUint32 parsed = 0, decoded = 0;
     622               0 :   nsMediaDecoder::AutoNotifyDecoded autoNotify(mDecoder, parsed, decoded);
     623                 : 
     624               0 :   nsAutoRef<NesteggPacketHolder> holder(NextPacket(VIDEO));
     625               0 :   if (!holder) {
     626               0 :     mVideoQueue.Finish();
     627               0 :     return false;
     628                 :   }
     629                 : 
     630               0 :   nestegg_packet* packet = holder->mPacket;
     631               0 :   unsigned int track = 0;
     632               0 :   int r = nestegg_packet_track(packet, &track);
     633               0 :   if (r == -1) {
     634               0 :     return false;
     635                 :   }
     636                 : 
     637               0 :   unsigned int count = 0;
     638               0 :   r = nestegg_packet_count(packet, &count);
     639               0 :   if (r == -1) {
     640               0 :     return false;
     641                 :   }
     642                 : 
     643               0 :   uint64_t tstamp = 0;
     644               0 :   r = nestegg_packet_tstamp(packet, &tstamp);
     645               0 :   if (r == -1) {
     646               0 :     return false;
     647                 :   }
     648                 : 
     649                 :   // The end time of this frame is the start time of the next frame.  Fetch
     650                 :   // the timestamp of the next packet for this track.  If we've reached the
     651                 :   // end of the resource, use the file's duration as the end time of this
     652                 :   // video frame.
     653               0 :   uint64_t next_tstamp = 0;
     654                 :   {
     655               0 :     nsAutoRef<NesteggPacketHolder> next_holder(NextPacket(VIDEO));
     656               0 :     if (next_holder) {
     657               0 :       r = nestegg_packet_tstamp(next_holder->mPacket, &next_tstamp);
     658               0 :       if (r == -1) {
     659               0 :         return false;
     660                 :       }
     661               0 :       mVideoPackets.PushFront(next_holder.disown());
     662                 :     } else {
     663               0 :       ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
     664                 :       nsBuiltinDecoderStateMachine* s =
     665               0 :         static_cast<nsBuiltinDecoderStateMachine*>(mDecoder->GetStateMachine());
     666               0 :       PRInt64 endTime = s->GetEndMediaTime();
     667               0 :       if (endTime == -1) {
     668               0 :         return false;
     669                 :       }
     670               0 :       next_tstamp = endTime * NS_PER_USEC;
     671                 :     }
     672                 :   }
     673                 : 
     674               0 :   PRInt64 tstamp_usecs = tstamp / NS_PER_USEC;
     675               0 :   for (PRUint32 i = 0; i < count; ++i) {
     676                 :     unsigned char* data;
     677                 :     size_t length;
     678               0 :     r = nestegg_packet_data(packet, i, &data, &length);
     679               0 :     if (r == -1) {
     680               0 :       return false;
     681                 :     }
     682                 : 
     683                 :     vpx_codec_stream_info_t si;
     684               0 :     memset(&si, 0, sizeof(si));
     685               0 :     si.sz = sizeof(si);
     686               0 :     vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), data, length, &si);
     687               0 :     if (aKeyframeSkip && (!si.is_kf || tstamp_usecs < aTimeThreshold)) {
     688                 :       // Skipping to next keyframe...
     689               0 :       parsed++; // Assume 1 frame per chunk.
     690               0 :       continue;
     691                 :     }
     692                 : 
     693               0 :     if (aKeyframeSkip && si.is_kf) {
     694               0 :       aKeyframeSkip = false;
     695                 :     }
     696                 : 
     697               0 :     if (vpx_codec_decode(&mVP8, data, length, NULL, 0)) {
     698               0 :       return false;
     699                 :     }
     700                 : 
     701                 :     // If the timestamp of the video frame is less than
     702                 :     // the time threshold required then it is not added
     703                 :     // to the video queue and won't be displayed.
     704               0 :     if (tstamp_usecs < aTimeThreshold) {
     705               0 :       parsed++; // Assume 1 frame per chunk.
     706               0 :       continue;
     707                 :     }
     708                 : 
     709               0 :     vpx_codec_iter_t  iter = NULL;
     710                 :     vpx_image_t      *img;
     711                 : 
     712               0 :     while ((img = vpx_codec_get_frame(&mVP8, &iter))) {
     713               0 :       NS_ASSERTION(img->fmt == IMG_FMT_I420, "WebM image format is not I420");
     714                 : 
     715                 :       // Chroma shifts are rounded down as per the decoding examples in the VP8 SDK
     716                 :       VideoData::YCbCrBuffer b;
     717               0 :       b.mPlanes[0].mData = img->planes[0];
     718               0 :       b.mPlanes[0].mStride = img->stride[0];
     719               0 :       b.mPlanes[0].mHeight = img->d_h;
     720               0 :       b.mPlanes[0].mWidth = img->d_w;
     721                 : 
     722               0 :       b.mPlanes[1].mData = img->planes[1];
     723               0 :       b.mPlanes[1].mStride = img->stride[1];
     724               0 :       b.mPlanes[1].mHeight = img->d_h >> img->y_chroma_shift;
     725               0 :       b.mPlanes[1].mWidth = img->d_w >> img->x_chroma_shift;
     726                 :  
     727               0 :       b.mPlanes[2].mData = img->planes[2];
     728               0 :       b.mPlanes[2].mStride = img->stride[2];
     729               0 :       b.mPlanes[2].mHeight = img->d_h >> img->y_chroma_shift;
     730               0 :       b.mPlanes[2].mWidth = img->d_w >> img->x_chroma_shift;
     731                 :   
     732               0 :       nsIntRect picture = mPicture;
     733               0 :       if (img->d_w != static_cast<PRUint32>(mInitialFrame.width) ||
     734                 :           img->d_h != static_cast<PRUint32>(mInitialFrame.height)) {
     735                 :         // Frame size is different from what the container reports. This is legal
     736                 :         // in WebM, and we will preserve the ratio of the crop rectangle as it
     737                 :         // was reported relative to the picture size reported by the container.
     738               0 :         picture.x = (mPicture.x * img->d_w) / mInitialFrame.width;
     739               0 :         picture.y = (mPicture.y * img->d_h) / mInitialFrame.height;
     740               0 :         picture.width = (img->d_w * mPicture.width) / mInitialFrame.width;
     741               0 :         picture.height = (img->d_h * mPicture.height) / mInitialFrame.height;
     742                 :       }
     743                 : 
     744                 :       VideoData *v = VideoData::Create(mInfo,
     745                 :                                        mDecoder->GetImageContainer(),
     746               0 :                                        holder->mOffset,
     747                 :                                        tstamp_usecs,
     748                 :                                        next_tstamp / NS_PER_USEC,
     749                 :                                        b,
     750                 :                                        si.is_kf,
     751                 :                                        -1,
     752               0 :                                        picture);
     753               0 :       if (!v) {
     754               0 :         return false;
     755                 :       }
     756               0 :       parsed++;
     757               0 :       decoded++;
     758               0 :       NS_ASSERTION(decoded <= parsed,
     759                 :         "Expect only 1 frame per chunk per packet in WebM...");
     760               0 :       mVideoQueue.Push(v);
     761                 :     }
     762                 :   }
     763                 : 
     764               0 :   return true;
     765                 : }
     766                 : 
     767               0 : nsresult nsWebMReader::Seek(PRInt64 aTarget, PRInt64 aStartTime, PRInt64 aEndTime,
     768                 :                             PRInt64 aCurrentTime)
     769                 : {
     770               0 :   NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
     771                 : 
     772               0 :   LOG(PR_LOG_DEBUG, ("%p About to seek to %fs", mDecoder, aTarget/1000000.0));
     773               0 :   if (NS_FAILED(ResetDecode())) {
     774               0 :     return NS_ERROR_FAILURE;
     775                 :   }
     776               0 :   PRUint32 trackToSeek = mHasVideo ? mVideoTrack : mAudioTrack;
     777               0 :   int r = nestegg_track_seek(mContext, trackToSeek, aTarget * NS_PER_USEC);
     778               0 :   if (r != 0) {
     779               0 :     return NS_ERROR_FAILURE;
     780                 :   }
     781               0 :   return DecodeToTarget(aTarget);
     782                 : }
     783                 : 
     784               0 : nsresult nsWebMReader::GetBuffered(nsTimeRanges* aBuffered, PRInt64 aStartTime)
     785                 : {
     786               0 :   MediaResource* resource = mDecoder->GetResource();
     787                 : 
     788                 :   uint64_t timecodeScale;
     789               0 :   if (!mContext || nestegg_tstamp_scale(mContext, &timecodeScale) == -1) {
     790               0 :     return NS_OK;
     791                 :   }
     792                 : 
     793                 :   // Special case completely cached files.  This also handles local files.
     794               0 :   if (resource->IsDataCachedToEndOfResource(0)) {
     795               0 :     uint64_t duration = 0;
     796               0 :     if (nestegg_duration(mContext, &duration) == 0) {
     797               0 :       aBuffered->Add(0, duration / NS_PER_S);
     798                 :     }
     799                 :   } else {
     800               0 :     MediaResource* resource = mDecoder->GetResource();
     801               0 :     nsTArray<MediaByteRange> ranges;
     802               0 :     nsresult res = resource->GetCachedRanges(ranges);
     803               0 :     NS_ENSURE_SUCCESS(res, res);
     804                 : 
     805               0 :     PRInt64 startTimeOffsetNS = aStartTime * NS_PER_USEC;
     806               0 :     for (PRUint32 index = 0; index < ranges.Length(); index++) {
     807                 :       mBufferedState->CalculateBufferedForRange(aBuffered,
     808               0 :                                                 ranges[index].mStart,
     809               0 :                                                 ranges[index].mEnd,
     810                 :                                                 timecodeScale,
     811               0 :                                                 startTimeOffsetNS);
     812                 :     }
     813                 :   }
     814                 : 
     815               0 :   return NS_OK;
     816                 : }
     817                 : 
     818               0 : void nsWebMReader::NotifyDataArrived(const char* aBuffer, PRUint32 aLength, PRInt64 aOffset)
     819                 : {
     820               0 :   mBufferedState->NotifyDataArrived(aBuffer, aLength, aOffset);
     821               0 : }

Generated by: LCOV version 1.7