diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml new file mode 100644 index 0000000000..c9b6c9d450 --- /dev/null +++ b/.github/workflows/stale.yaml @@ -0,0 +1,20 @@ +name: Close stale issues and PRs +on: + workflow_dispatch: {} + schedule: + - cron: "30 1 * * *" + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v8 + with: + stale-issue-message: "This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days." + stale-pr-message: "This PR is stale because it has been open 45 days with no activity. Remove stale label or comment or this will be closed in 10 days." + close-issue-message: "This issue was closed because it has been stalled for 5 days with no activity." + close-pr-message: "This PR was closed because it has been stalled for 10 days with no activity." + days-before-issue-stale: 30 + days-before-pr-stale: 45 + days-before-issue-close: 5 + days-before-pr-close: 10 \ No newline at end of file diff --git a/components/Card.tsx b/components/Card.tsx index 226383e9f7..8836b3f97e 100644 --- a/components/Card.tsx +++ b/components/Card.tsx @@ -1,5 +1,6 @@ import { ArrowRightIcon } from '@100mslive/react-icons'; import { Flex, Box, Text } from '@100mslive/react-ui'; +import { AppAnalytics } from '../lib/publishEvents'; interface CardProps { icon: any; @@ -17,7 +18,7 @@ const Card: React.FC = ({ icon, title, link, subText, id, cta = 'Read justify="between" onClick={() => { if (link) { - window.analytics.track('card.clicked', { + AppAnalytics.track('card.clicked', { title, link, currentPage: window.location.href diff --git a/components/ChipDropDown.tsx b/components/ChipDropDown.tsx index cc2405df05..613aa786c4 100644 --- a/components/ChipDropDown.tsx +++ b/components/ChipDropDown.tsx @@ -6,6 +6,7 @@ import useClickOutside from '@/lib/useClickOutside'; import { getUpdatedPlatformName } from '@/lib/utils'; import Chip from './Chip'; import { menuItem } from './Sidebar'; +import { AppAnalytics } from '../lib/publishEvents'; const ChipDropDown = ({ openFilter, @@ -35,7 +36,7 @@ const ChipDropDown = ({ { - window.analytics.track('platform.changed', { + AppAnalytics.track('platform.changed', { title: document.title, referrer: document.referrer, path: window.location.hostname, diff --git a/components/Code.tsx b/components/Code.tsx index 3a1a04415e..41c2b84825 100644 --- a/components/Code.tsx +++ b/components/Code.tsx @@ -1,5 +1,6 @@ import React, { PropsWithChildren } from 'react'; import { Box } from '@100mslive/react-ui'; +import { AppAnalytics } from '../lib/publishEvents'; export const CopyIcon = () => ( ( ); -const Code: React.FC> = - ({ children, section, sectionIndex, tab }) => { - const textRef = React.useRef(null); +const Code: React.FC< + PropsWithChildren<{ section?: string; sectionIndex?: number; tab?: string }> +> = ({ children, section, sectionIndex, tab }) => { + const textRef = React.useRef(null); - const copyFunction = () => { - setCopy(true); - // @ts-ignore - navigator.clipboard.writeText(textRef.current.textContent); - setTimeout(() => { - setCopy(false); - }, 2000); + const copyFunction = () => { + setCopy(true); + // @ts-ignore + navigator.clipboard.writeText(textRef.current.textContent); + setTimeout(() => { + setCopy(false); + }, 2000); - window.analytics.track('copy.to.clipboard', { - title: document.title, - referrer: document.referrer, - path: window.location.hostname, - pathname: window.location.pathname, - href: window.location.href, - section, - sectionIndex, - tab - }); - }; - const [copy, setCopy] = React.useState(false); + AppAnalytics.track('copy.to.clipboard', { + title: document.title, + referrer: document.referrer, + path: window.location.hostname, + pathname: window.location.pathname, + href: window.location.href, + section, + sectionIndex, + tab + }); + }; + const [copy, setCopy] = React.useState(false); - return ( -
-                
-                    
-                        {!copy ? (
-                            
-                        ) : (
-                            
-                        )}
-                    
-                    
-                        {children}
-                    
-                    
+    return (
+        
+            
+                
+                    {!copy ? (
+                        
+                    ) : (
+                        
+                    )}
                 
-            
- ); - }; + + {children} + + +
+
+ ); +}; export default Code; diff --git a/components/ExampleCard.tsx b/components/ExampleCard.tsx index 4e691ccf63..7684f80b5c 100644 --- a/components/ExampleCard.tsx +++ b/components/ExampleCard.tsx @@ -1,6 +1,8 @@ +import React from 'react'; import * as reactIcons from '@100mslive/react-icons'; import { Box, Flex, HorizontalDivider, Text } from '@100mslive/react-ui'; import { Technologies, technologyIconMap } from './TechnologySelect'; +import { AppAnalytics } from '../lib/publishEvents'; interface Props extends React.ComponentPropsWithoutRef { title: string; @@ -115,34 +117,33 @@ function IconList({ technologies, showIcon }: IconListProps) { {technology} ); - } else { - return ( - - {technologies.map((technology) => { - let Icon; - const iconNameOrPath = technologyIconMap[technology].icon; - if ( - typeof iconNameOrPath === 'string' && - reactIcons[iconNameOrPath] !== undefined - ) { - Icon = reactIcons[iconNameOrPath]; - } else { - Icon = iconNameOrPath; - } - return ( - - - - ); - })} - - ); } + return ( + + {technologies.map((technology) => { + let Icon; + const iconNameOrPath = technologyIconMap[technology].icon; + if ( + typeof iconNameOrPath === 'string' && + reactIcons[iconNameOrPath] !== undefined + ) { + Icon = reactIcons[iconNameOrPath]; + } else { + Icon = iconNameOrPath; + } + return ( + + + + ); + })} + + ); } type TagListProps = { @@ -156,7 +157,7 @@ function TagList({ tags, title }: TagListProps) { {tags.map((tag) => ( { - window.analytics.track('examples.tag.clicked', { + AppAnalytics.track('examples.tag.clicked', { tag, title }); diff --git a/components/Feedback.tsx b/components/Feedback.tsx index 476fa25ba9..2624e46d40 100644 --- a/components/Feedback.tsx +++ b/components/Feedback.tsx @@ -2,6 +2,7 @@ import React from 'react'; import { Flex, Box, Button, Text } from '@100mslive/react-ui'; import useClickOutside from '@/lib/useClickOutside'; import { currentUser } from '../lib/currentUser'; +import { AppAnalytics } from '../lib/publishEvents'; const emojis = [{ score: 1 }, { score: 2 }, { score: 3 }, { score: 4 }]; @@ -47,11 +48,11 @@ const Feedback = () => { title={getPlaceholder[`title-${id + 1}`]} style={{ position: 'relative', width: '24px', height: '24px' }} key={emoji.score} - role='button' + role="button" onClick={() => { const userDetails = currentUser(); if (showTextBox === false) { - window.analytics.track('docs.feedback.rating', { + AppAnalytics.track('docs.feedback.rating', { title: document.title, referrer: document.referrer, path: window.location.pathname, @@ -59,7 +60,7 @@ const Feedback = () => { timeStamp: new Date().toLocaleString(), customer_id: userDetails?.customer_id, user_id: userDetails?.user_id, - email: userDetails?.email, + email: userDetails?.email }); setFirstSelection(emoji.score); } @@ -121,7 +122,7 @@ const Feedback = () => { }} onClick={() => { const userDetails = currentUser(); - window.analytics.track('docs.feedback.message', { + AppAnalytics.track('docs.feedback.message', { title: document.title, message: message || '', rating: firstSelection, diff --git a/components/Header.tsx b/components/Header.tsx index 0ba030ff0e..4853f25195 100644 --- a/components/Header.tsx +++ b/components/Header.tsx @@ -1,5 +1,4 @@ import React, { useEffect, useState } from 'react'; -import UtmLinkWrapper from './UtmLinkWrapper'; import { useRouter } from 'next/router'; import { CrossIcon, @@ -10,11 +9,13 @@ import { SearchIcon } from '@100mslive/react-icons'; import { Flex, Text, useTheme } from '@100mslive/react-ui'; -import ActiveLink, { ActiveLinkProps } from './ActiveLink'; -import SearchModal from './SearchModal'; import { WebsiteLink, DashboardLink, GitHubLink, DiscordLink, ContactLink } from '@/lib/utils'; import { references } from 'api-references'; import { exposedPlatformNames } from 'common'; +import SearchModal from './SearchModal'; +import ActiveLink, { ActiveLinkProps } from './ActiveLink'; +import UtmLinkWrapper from './UtmLinkWrapper'; +import { AppAnalytics } from '../lib/publishEvents'; import { NavAPIReference } from './NavAPIReference'; interface Props { @@ -112,7 +113,7 @@ const Header: React.FC = ({ target="_blank" rel="noreferrer" onClick={() => - window.analytics.track('link.clicked', { + AppAnalytics.track('link.clicked', { btnId: 'logo.clicked', currentPage: window.location.href }) @@ -124,7 +125,7 @@ const Header: React.FC = ({ - window.analytics.track('link.clicked', { + AppAnalytics.track('link.clicked', { btnId: 'docs.clicked', currentPage: window.location.href }) @@ -136,7 +137,7 @@ const Header: React.FC = ({ - window.analytics.track('link.clicked', { + AppAnalytics.track('link.clicked', { btnId: 'examples.clicked', currentPage: window.location.href }) @@ -171,7 +172,7 @@ const Header: React.FC = ({ noHighlight target="_blank" onClick={() => - window.analytics.track('link.clicked', { + AppAnalytics.track('link.clicked', { btnId: '100ms.live.clicked', currentPage: window.location.href }) @@ -185,7 +186,7 @@ const Header: React.FC = ({ noHighlight target="_blank" onClick={() => - window.analytics.track('link.clicked', { + AppAnalytics.track('link.clicked', { btnId: 'sales.clicked', currentPage: window.location.href }) @@ -198,7 +199,7 @@ const Header: React.FC = ({ noHighlight target="_blank" onClick={() => - window.analytics.track('link.clicked', { + AppAnalytics.track('link.clicked', { btnId: 'dashboard.clicked', currentPage: window.location.href }) @@ -212,7 +213,7 @@ const Header: React.FC = ({ target="_blank" rel="noreferrer" onClick={() => - window.analytics.track('link.clicked', { + AppAnalytics.track('link.clicked', { btnId: 'discord.clicked', currentPage: window.location.href }) @@ -231,7 +232,7 @@ const Header: React.FC = ({ target="_blank" rel="noreferrer" onClick={() => - window.analytics.track('link.clicked', { + AppAnalytics.track('link.clicked', { btnId: 'github.clicked', currentPage: window.location.href }) @@ -279,43 +280,41 @@ const HeaderLink = ({ children, noHighlight, ...rest -}: React.PropsWithChildren>) => { - return ( - - {(className) => ( - - {children} - - )} - - ); -}; +}: React.PropsWithChildren>) => ( + + {(className) => ( + + {children} + + )} + +); diff --git a/components/MDXComponents.tsx b/components/MDXComponents.tsx index afedc8d742..0d04eafc87 100644 --- a/components/MDXComponents.tsx +++ b/components/MDXComponents.tsx @@ -27,6 +27,7 @@ import { PortraitImage } from './PortraitImage'; import { CollapsibleRoot, CollapsiblePreview, CollapsibleContent } from './CollapsibleSection'; import { CollapsibleStep } from './CollapsibleStep'; import SuggestedBlogs from './SuggestedBlogs'; +import { AppAnalytics } from '@/lib/publishEvents'; const CodeCustom = (props: any) => {props.children}; @@ -71,7 +72,7 @@ const LinkCustom = (props) => { rel="noopener noreferrer" href={href} onClick={() => - window.analytics.track('link.clicked', { + AppAnalytics.track('link.clicked', { btnId, componentId: window?.location?.pathname.split('/')?.[2], // splitArr = ['', 'docs', 'sdk'] page: window?.location?.pathname diff --git a/components/SearchModal.tsx b/components/SearchModal.tsx index d090d8cf92..1caa05f0b5 100644 --- a/components/SearchModal.tsx +++ b/components/SearchModal.tsx @@ -1,15 +1,16 @@ import React, { useEffect, useRef, useState } from 'react'; import Image from 'next/image'; -import UtmLinkWrapper from './UtmLinkWrapper'; import { SearchIcon, ArrowRightIcon } from '@100mslive/react-icons'; import { Flex, Box, Text } from '@100mslive/react-ui'; import useClickOutside from '@/lib/useClickOutside'; import algoliasearch from 'algoliasearch/lite'; import { InstantSearch, connectHits, connectSearchBox, Configure } from 'react-instantsearch-dom'; +import { titleCasing } from '@/lib/utils'; import Tag from './Tag'; +import UtmLinkWrapper from './UtmLinkWrapper'; import Chip from './Chip'; import ChipDropDown from './ChipDropDown'; -import { titleCasing } from '@/lib/utils'; +import { AppAnalytics } from '../lib/publishEvents'; const searchClient = algoliasearch( process.env.NEXT_PUBLIC_ALGOLIA_APP_ID || '', @@ -25,14 +26,25 @@ const searchInfoItems = [ { title: 'to navigate', content: [ - , - + , + ] }, { title: 'to select', content: [ { if (hits.length === 0) { - window.analytics.track('no.results', { + AppAnalytics.track('no.results', { title: document.title, referrer: document.referrer, path: window.location.hostname, @@ -204,7 +216,7 @@ const ResultBox = ({ borderRadius: '$0' }} onClick={() => { - window.analytics.track('docs.search.result.clicked', { + AppAnalytics.track('docs.search.result.clicked', { totalNumberOfResults: hits?.length, textInSearch: searchTerm || '', rankOfSearchResult: i + 1, @@ -412,7 +424,7 @@ const SearchModal: React.FC = ({ setModal }) => { }, [hitsCount, searchTerm]); useClickOutside(ref, () => { - window.analytics.track('docs.search.dismissed', { + AppAnalytics.track('docs.search.dismissed', { textInSearch: searchTerm || '', totalNumberOfResults: hitsCount, referrer: document.referrer, @@ -515,7 +527,7 @@ const FilterBar = ({ onClick={() => { if (typeFilter === type) setTypeFilter(ALL_TYPES); else { - window.analytics.track('type.changed', { + AppAnalytics.track('type.changed', { title: document.title, referrer: document.referrer, path: window.location.hostname, @@ -540,8 +552,7 @@ const FilterBar = ({ ); -const getFilterQuery = (platformFilter, typeFilter) => { - return `${platformFilter === ALL_PLATFORMS ? 'NOT ' : ''}platformName:"${platformFilter}" AND ${ +const getFilterQuery = (platformFilter, typeFilter) => + `${platformFilter === ALL_PLATFORMS ? 'NOT ' : ''}platformName:"${platformFilter}" AND ${ typeFilter === ALL_TYPES ? 'NOT ' : '' }type:"${typeFilter}"`; -}; diff --git a/components/SegmentAnalytics.tsx b/components/SegmentAnalytics.tsx index 38a09b6abd..51e34a1642 100644 --- a/components/SegmentAnalytics.tsx +++ b/components/SegmentAnalytics.tsx @@ -1,5 +1,5 @@ import React from 'react'; - +import { AppAnalytics } from '../lib/publishEvents'; const SegmentAnalytics = ({ title, options }) => { React.useEffect(() => { if (typeof window !== 'undefined') { @@ -12,7 +12,7 @@ const SegmentAnalytics = ({ title, options }) => { }, {}); // @ts-ignore const url = new URL(window.location.href); - window.analytics.page(title, { + AppAnalytics.page(title, { ...params, ...options, title, @@ -26,7 +26,7 @@ const SegmentAnalytics = ({ title, options }) => { utm_keyword: url.searchParams.get('utm_keyword'), utm_term: url.searchParams.get('utm_term') }); - window.analytics.track('page.viewed', { + AppAnalytics.track('page.viewed', { ...params, ...options, title: document.title, diff --git a/components/Sidebar.tsx b/components/Sidebar.tsx index bde4ddcf4f..2f7929b899 100644 --- a/components/Sidebar.tsx +++ b/components/Sidebar.tsx @@ -1,7 +1,6 @@ /* eslint-disable react/no-array-index-key */ import React, { useEffect, useState, useRef } from 'react'; import { useRouter } from 'next/router'; -import UtmLinkWrapper from './UtmLinkWrapper'; import FlutterIcon from '@/assets/FlutterIcon'; import AndroidIcon from '@/assets/icons/AndroidIcon'; import IosIcon from '@/assets/icons/IosIcon'; @@ -26,9 +25,11 @@ import { import { Listbox } from '@headlessui/react'; import { Flex, Box, Text, CSS } from '@100mslive/react-ui'; import { getUpdatedPlatformName } from '@/lib/utils'; +import { AppAnalytics } from '../lib/publishEvents'; import SidebarSection from './SidebarSection'; import ReleaseNotes from './ReleaseNotes'; import PlatformAccordion from './PlatformAccordion'; +import UtmLinkWrapper from './UtmLinkWrapper'; const accordionIconStyle = { height: '24px', width: '24px', color: 'inherit' }; @@ -141,7 +142,7 @@ const Sidebar: React.FC = ({ const changeTech = (s) => { setTech((prevSelection) => { - window.analytics.track('link.clicked', { + AppAnalytics.track('link.clicked', { btnId: 'platform.switched', switchedTo: s.name, switchedFrom: prevSelection.name, @@ -334,7 +335,7 @@ const Sidebar: React.FC = ({ }} onClick={() => { setShowBaseView(true); - window.analytics.track('btn.clicked', { + AppAnalytics.track('btn.clicked', { btnId: 'content.overview.clicked', currentPage: window.location.href }); diff --git a/components/SuggestedBlogs.tsx b/components/SuggestedBlogs.tsx index 03fd0754dc..8df674dd12 100644 --- a/components/SuggestedBlogs.tsx +++ b/components/SuggestedBlogs.tsx @@ -1,6 +1,8 @@ import React from 'react'; import { Box, Text } from '@100mslive/react-ui'; import { ExternalLinkIcon } from '@100mslive/react-icons'; +import { AppAnalytics } from '../lib/publishEvents'; + interface Props { suggestedBlogs: Array<{ title: string; @@ -54,7 +56,7 @@ const SuggestedBlogs: React.FC = ({ suggestedBlogs }) => { } }} onClick={() => { - window.analytics.track('docs.blog.redirect', { + AppAnalytics.track('docs.blog.redirect', { type: 'blog_redirect', blog_title: blog.title, path: window.location.pathname, diff --git a/docs/android/v2/how-to-guides/extend-capabilities/noise-cancellation.mdx b/docs/android/v2/how-to-guides/extend-capabilities/noise-cancellation.mdx index 771bd3f3b9..183db57ff3 100644 --- a/docs/android/v2/how-to-guides/extend-capabilities/noise-cancellation.mdx +++ b/docs/android/v2/how-to-guides/extend-capabilities/noise-cancellation.mdx @@ -14,8 +14,10 @@ implementation "live.100ms:hms-noise-cancellation-android:$hmsVersion" ``` 2. Toggle noise cancellation on in your application with `hmsSDK.setNoiseCancellationEnabled(true)` in your `onJoin` callback. -> Note: Prebuilt also supports noise cancellation, to enable it add the import as above and ensure it's enabled from your prebuilt dashboard. From the [dashboard](https://dashboard.100ms.live/), select the template, go to "Customize Prebuilt" -> "Screens and Components" -> "Noise Cancellation State". -> You will also need to toggle it in the [dashboard's](https://dashboard.100ms.live/)"Template" -> "Advanced Settings" -> "Noise Cancellation" + +**IMPORTANT**
+Enable Noise Cancellation in the template configuration. Learn more about enabling this feature from [here](/get-started/v2/get-started/features/noise-cancellation#enabling-the-noise-cancellation) +
> Note: Adding the library for noise cancellation will increase app size by 5.6 Mb. Noise cancellation is turned off by default for all calls. diff --git a/docs/flutter/v2/how-to-guides/extend-capabilities/noise-cancellation.mdx b/docs/flutter/v2/how-to-guides/extend-capabilities/noise-cancellation.mdx index 321f7a63ad..ef9123c75e 100644 --- a/docs/flutter/v2/how-to-guides/extend-capabilities/noise-cancellation.mdx +++ b/docs/flutter/v2/how-to-guides/extend-capabilities/noise-cancellation.mdx @@ -23,7 +23,10 @@ The Noise Cancellation feature employs a sophisticated AI model trained specific `hmssdk_flutter` version 1.10.0 or higher is required to utilize the Noise Cancellation feature in your Flutter application. -Also, this feature has gated access currently. To enable Noise Cancellation in your Rooms, reach out to **support@100ms.live** or connect with us on [100ms Discord](https://discord.com/invite/kGdmszyzq2). + +**IMPORTANT**
+Enable Noise Cancellation in the template configuration. Learn more about enabling this feature from [here](/get-started/v2/get-started/features/noise-cancellation#enabling-the-noise-cancellation) +
## Usage diff --git a/docs/flutter/v2/quickstart/token-endpoint.mdx b/docs/flutter/v2/quickstart/token-endpoint.mdx deleted file mode 100644 index 37dd3f21b6..0000000000 --- a/docs/flutter/v2/quickstart/token-endpoint.mdx +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Auth Token Endpoint Guide -nav: 2.4 ---- - -## Overview - -100ms provides an option to get `Auth Tokens` without setting up a token generation backend service to simplify your integration journey while testing the [sample app](https://github.com/100mslive/100ms-web) or building integration with 100ms. - -You can find the token endpoint from the [developer page](https://dashboard.100ms.live/developer) in your 100ms dashboard. - -![Token endpoint](/guides/token-endpoint-dashboard.png) - -We recommend you move to your token generation service before you transition your app to production, as our token endpoint service will not scale in production. - -The "Sample Apps" built using 100ms client SDKs require an `Auth Token` to join a room to initiate a video conferencing or live streaming session. Please check the [Authentication and Tokens guide](/flutter/v2/foundation/security-and-tokens) - -Please note that you cannot use the token endpoint to create a `Management Token` for server APIs. Refer to the [Management Token section](/flutter/v2/foundation/security-and-tokens#management-token) in Authentication and Tokens guide for more information. - -## Get an auth token using token endpoint - -You can use the token endpoint from your 100ms dashboard while building integration with 100ms. This acts as a tool enabling front-end developers to complete the integration without depending on the backend developers to set up a token generation backend service. - -**URL format:** `api/token` - -100ms token endpoint can generate an Auth token with the inputs passed, such as room_id, role, & user_id (optional - your internal user identifier as the peer's user_id). You can use [jwt.io](https://jwt.io/) to validate whether the Auth token contains the same input values. - - - - -```bash -curl --location --request POST 'https://prod-in2.100ms.live/hmsapi/johndoe.app.100ms.live/api/token' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "room_id":"633fcdd84208780bf665346a", - "role":"host", - "user_id":"1234" -}' -``` - - - - -```json -{ - "token": "eyJ0eXAiOiJKV1QiLCJhbGciOi***************************R3tT-Yk", - "msg": "token generated successfully", - "status": 200, - "success": true, - "api_version": "2.0.192" -} -``` - - - -### Example client-side implementation - -You can directly add this to your client-side implementation, check our [sample app](https://github.com/100mslive/100ms-flutter/blob/0d4c3b5409003932d80cb19f67027a63424169e7/example/lib/service/room_service.dart#L8) for reference. - -### Disable 100ms token endpoint - -Due to some security concerns, if you don't wish to use the token endpoint to generate Auth tokens, then you can disable it on the [Developers page](https://dashboard.100ms.live/developer) on your dashboard by disabling the option "Disable <room_id>/<role> link format." - -![Disable Token endpoint](/guides/disable-token-endpoint.png) - -#### Error Response - -Once you're disabled it on the dashboard, the requests to create an Auth token using the 100ms token endpoint will throw the below error: - -```json -{ - "success": false, - "msg": "Generating token using the room_id and role is disabled.", - "api_version": "2.0.192" -} -``` diff --git a/docs/get-started/v2/get-started/features/noise-cancellation.mdx b/docs/get-started/v2/get-started/features/noise-cancellation.mdx new file mode 100644 index 0000000000..565ab3f5a1 --- /dev/null +++ b/docs/get-started/v2/get-started/features/noise-cancellation.mdx @@ -0,0 +1,98 @@ +--- +title: Noise Cancellation +nav: 3.9 +--- + +The Noise Cancellation feature is an invaluable tool designed to enhance the audio quality in scenarios such as conferences, live streams, and recordings where unwanted background noise can degrade the listening experience. + +## Key Benefits + +- **Enhanced Audio Quality**: Eliminates unwanted noise, including background chatter, clicks, claps, barking, and other sudden audio disturbances, resulting in a more pleasant listening experience for your audience. + +- **Improved Clarity**: Ensures that the primary audio content remains prominent and intelligible by reducing distractions caused by ambient noise. + +- **Optimized Communication**: Facilitates seamless communication in conferences and live streams by minimizing disruptions caused by environmental factors, thereby enhancing the overall professionalism of the presentation. + +This is a guide to enabling and using the Noise Cancellation on 100ms. + + + + + +### Getting Started +100ms Noise Cancellation is powered by [krisp.ai](https://krisp.ai/),ensuring clear communication by filtering out background noise. + + +**IMPORTANT**
+This is an add-on paid feature, for details check [100ms pricing page](https://www.100ms.live/pricing/) +
+ +### Enabling Noise Cancellation + +Noise Cancellation can be configured from the 100ms dashboard. + +#### Enabling Noise Cancellation at template level + +1. Navigate to a specific Template where you wish to enable the Noise Cancellation. +2. Click on **‘Advanced Settings’** tab in the Template configuration. +3. Enable ‘**Noise Cancellation**’. + + + + +By default, noise cancellation is enabled for all peers and roles for all the new templates. For existing templates, to enable Noise Cancellation by default for all the peers and roles check below section. + + +#### Enabling Noise Cancellation by default in preview + +For certain roles, you would like to enable Noise Cancellation by default from preview state. First enable the Noise Cancellation on Template as mentioned above. + +1. Navigate to a specific template where you wish to enable the Noise Cancellation. +2. Click on **'Customise Prebuilt'** on top right in the Template. +3. Click on **'Screens and Components'** and select the role where you wish to enable the Noise Cancellation by default +4. Enable **'Noise Cancellation State'**. + + + +### Integrating the Noise Cancellation +Noise Cancellation is available across all platforms (iOS, Android, Flutter, React Native and Web). Refer to the following platform SDK specific guides: +- [iOS](/ios/v2/how-to-guides/extend-capabilities/plugins/noise-cancellation) +- [Android](/android/v2/how-to-guides/extend-capabilities/noise-cancellation) +- [React Native](/react-native/v2/how-to-guides/extend-capabilities/noise-cancellation) +- [Flutter](/flutter/v2/how-to-guides/extend-capabilities/noise-cancellation) +- [Web](/javascript/v2/how-to-guides/extend-capabilities/plugins/krisp-noise-cancellation) + +### Using Noise Cancellation +Once Noise Cancellation is enabled and saved from the template configuration, the Noise Cancellation can be used across devices. Noise Cancellation can be activated or deactivated in the room or during the preview screen. + + + +
+ +### Frequently Asked Questions (FAQ) + +1. **Is Noise Cancellation a chargeable feature?** + + Yes, 100ms Noise Cancellation is charged based on per peer usage minutes. This means that for every peer that is enabling Noise Cancellation during session, their individual usage minutes will be aggregated. For more information, kindly check [100ms pricing page](https://www.100ms.live/pricing/). + +2. **Is Noise Cancellation available in prebuilt?** + + Yes, 100ms Prebuilt supports Noise Cancellation out of the box. Enable the Noise Cancellation as mentioned above. + +3. **How to track Noise Cancellation usage?** + + The usage can be tracked from the Usage Overview section on the [100ms Dashboard](https://dashboard.100ms.live/dashboard). \ No newline at end of file diff --git a/docs/get-started/v2/get-started/features/ui-composition.mdx b/docs/get-started/v2/get-started/features/ui-composition.mdx index ede3fc7b7b..95ae86513e 100644 --- a/docs/get-started/v2/get-started/features/ui-composition.mdx +++ b/docs/get-started/v2/get-started/features/ui-composition.mdx @@ -19,7 +19,7 @@ By default, 100ms live streams and recordings use [100ms pre-built links](../pre Pre-built links are easy to use, but can be limited in customization. Since Beam can open any web app, you can customize the composition UI with HTML/CSS/JavaScript. -You can start from scratch or use the [100ms sample web app](../../../../javascript/v2/quickstart/react-sample-app/quickstart) (which is used by pre-built links) as a starting point for this customization. +You can start from scratch or use the [100ms sample web app](../../../../javascript/v2/quickstart/prebuilt-quickstart) (which is used by pre-built links) as a starting point for this customization. ### Things to note diff --git a/docs/get-started/v2/get-started/security-and-privacy/100ms-policy-on-security-and-privacy.mdx b/docs/get-started/v2/get-started/security-and-privacy/100ms-policy-on-security-and-privacy.mdx index 8d7649db64..189926caa4 100644 --- a/docs/get-started/v2/get-started/security-and-privacy/100ms-policy-on-security-and-privacy.mdx +++ b/docs/get-started/v2/get-started/security-and-privacy/100ms-policy-on-security-and-privacy.mdx @@ -44,6 +44,7 @@ nav: 9.1 - 100ms minimizes collection of Personally Identifiable Information (PII) and has controls in place to prevent PII breaches and unauthorized access. - In addition to access-controls, monitoring, data security controls, 100ms also has third-party disclosure policies in place. - 100ms can provide COPPA (Children's Online Privacy Protection Act) compliant recordings even in multi-student classrooms by implementing custom recording workflows. +- 100ms does not use customer data to train its transcription models. However, it relies on an external service for its summarization feature. No data (customer's or 100ms') is stored, retained, or used for model training by this external service. ## Special Requests - IP whitelists, Data Residency diff --git a/docs/get-started/v2/get-started/security-and-privacy/HIPAA compliance/HIPAA-workspace.mdx b/docs/get-started/v2/get-started/security-and-privacy/HIPAA compliance/HIPAA-workspace.mdx index ad588fe470..b58dc96f08 100644 --- a/docs/get-started/v2/get-started/security-and-privacy/HIPAA compliance/HIPAA-workspace.mdx +++ b/docs/get-started/v2/get-started/security-and-privacy/HIPAA compliance/HIPAA-workspace.mdx @@ -31,7 +31,7 @@ This section outlines 100ms' security framework and technical implementation, co - 100ms does not store your video, audio or screensharing data. - All of 100ms’ video and audio calls are encrypted to and from 100ms’ SFU servers. Encrypted media in transit is decrypted only in the server memory, ensuring that the exposure of the decrypted stream is as minimal as possible. At the application layer, we never have access to unencrypted media. -- All audio, video, and screen sharing media are transmitted encrypted using the Secure Real-time Transport Protocol (SRTP) which are encrypted over Datagram Transport Layer Security (DTLS) with AES 256-bit encryption. +- All audio, video, and screen sharing media are transmitted and encrypted using the Secure Real-time Transport Protocol (SRTP) which is encrypted over Datagram Transport Layer Security (DTLS) with AES 256-bit encryption. - TURN servers are media relay servers only so there is no processing or storage of media. TURN servers do not and cannot decrypt the media that they relay. - Disk encryption is enabled on the servers. @@ -41,6 +41,40 @@ This section outlines 100ms' security framework and technical implementation, co - Recordings, when stored with 100ms, are stored on encrypted disk servers and deleted after 15 days. - **Only for HIPAA Workspaces** - Recording with the customer’s cloud storage bucket configured on 100ms is the only method allowed by 100ms. As soon as the recording for a particular session is complete, it is uploaded to the customers’ storage and immediately deleted from ours. - **Only for HIPAA Workspaces** - Access to customers’ buckets cannot be obtained by 100ms because write-only access is enforced when configuring the customer’s storage bucket. +- In case of a failure of processing or upload of recordings, the failed recordings are stored with 100ms' secure file storage for up to 7 days and reupload is attempted during this period. After 7 days, these files are automatically deleted. + +#### Post call transcription + +- Post call transcription is an opt-in feature, which requires call recording to be enabled by the customer. +- A speaker-labeled transcript is generated upon completion of the call recording. +- Transcripts are generated and processed on 100ms’ servers. No data from the transcription process is used for training any AI models. +- Transcripts are not stored by 100ms once generated. +- The audio file of the recording is securely stored within 100ms’ temporary file storage for up to 14 days in the same region as the customer’s workspace if post call transcription is enabled. This is to ensure that in case of a failure of processing or upload of the recording, transcript or summary, the data is not lost for the customer. After 14 days, the files are automatically deleted. +- The transcript itself is not stored by 100ms. In case of a failure of processing or upload, the transcript is regenerated, and a reupload is attempted. If the reupload is successful, all stored files are promptly deleted. + +#### AI-generated summary + +- AI-generated summary is an opt-in feature, which requires call recording as well as post call transcription to be enabled. +- 100ms uses an external service for generating summaries. A HIPAA Business Associate Agreement (BAA) has been signed between 100ms and the service provider. +- No data is stored by 100ms or the sub-processor providing the service. +- The data is not used for training any AI models. +- Summaries are not stored by 100ms once generated. +- The audio file of the recording is securely stored within 100ms’ temporary file storage for up to 14 days in the same region as the customer’s workspace if post call transcription is enabled. This is to ensure that in case of a failure of processing or upload of the recording, transcript or summary, the data is not lost for the customer. After 14 days, the files are automatically deleted. +- The summary itself is not stored by 100ms. In case of a failure, the transcript and the summary are regenerated, and a reupload is attempted. If the reupload is successful, all stored files are promptly deleted. + +#### Closed captions + +- 100ms uses an external service for speaker-labeled closed captions. +- A HIPAA Business Associate Agreement (BAA) has been signed between 100ms and the service provider. +- No data is stored by 100ms or the sub-processor providing the service. +- The data is not used for training any AI models. + +#### Session Initiation Protocol (SIP) - Audio and Video + +- Media encryption is performed using the **AES_CM_128_HMAC_SHA1_80** cryptographic suite. +- Media is not stored at any point; all transmissions are transient. +- Phone numbers, when used, are masked and securely stored in call logs only. + #### Secure webhooks @@ -129,9 +163,13 @@ Following services and features can be **enabled** and used: 2. Composite Recording with user’s cloud storage bucket configured 1. Recording with the [customer’s cloud storage bucket configured](/get-started/v2/get-started/features/recordings/recording-assets/storage-configuration) on 100ms is the only method allowed by 100ms. As soon as the recording for a particular session is complete, it is uploaded to the customers’ storage and immediately deleted from ours. 2. Access to customers’ buckets cannot be obtained by 100ms because write-only access is enforced when configuring the customer’s storage bucket. -3. Whiteboard -4. Session Initiation Protocol (SIP) (Limited Preview Access) -5. Chat +3. Closed Captions +4. Post Call Transcription +5. AI-Generated Summaries +6. Noise Cancellation +7. Whiteboard +8. Session Initiation Protocol (SIP) - Audio and Video +9. Chat Following services within the template have been **disabled** and locked for the HIPAA Workspace: @@ -140,16 +178,12 @@ Following services within the template have been **disabled** and locked for the 3. Track Recording 4. Stream Recording 5. Live Transcription for HLS -6. Post Call Transcription -7. AI-Generated Summaries Following services are in the process of being HIPAA compliant: -1. Live Transcription (Video Conferencing and HLS) - Diarized and Non-diarized -2. Post Call Transcription - Speaker Labelled (Diarized) -3. AI-Generated Summaries -4. Track Recording -5. Stream Recording +1. Track Recording +2. Stream Recording +3. Custom Composite Recording #### Server Side @@ -291,6 +325,10 @@ Creating and using a HIPAA workspace doesn’t guarantee HIPAA compliance until We have signed BAAs with critical services and features which will have temporary access to the customers’ ePHI. +5. Is there any data stored and retained by 100ms or its sub-professors for the purpose of training any AI models? + + Recordings can be stored with 100ms for upto 7 days, in case of a failure of processing or upload to the customer's configured storage bucket. Transcripts or summaries aren't stored with 100ms and aren't used for training any AI models. + 5. **Can a workspace be deleted?** A workspace cannot be deleted at this point of time. If you do require this, please reach out to us through the support widget on 100ms dashboard. \ No newline at end of file diff --git a/docs/ios/v2/how-to-guides/extend-capabilities/plugins/noise-cancellation.mdx b/docs/ios/v2/how-to-guides/extend-capabilities/plugins/noise-cancellation.mdx index 769c3105d2..504abea731 100644 --- a/docs/ios/v2/how-to-guides/extend-capabilities/plugins/noise-cancellation.mdx +++ b/docs/ios/v2/how-to-guides/extend-capabilities/plugins/noise-cancellation.mdx @@ -29,6 +29,11 @@ let pathForNCModel = HMSNoiseCancellationModels.path(for: .smallFullBand) ## Minimum Requirements - Minimum 100ms SDK version required is `1.7.0` + +**IMPORTANT**
+Enable Noise Cancellation in the template configuration. Learn more about enabling this feature from [here](/get-started/v2/get-started/features/noise-cancellation#enabling-the-noise-cancellation) +
+ ## How to enable noise cancellation in your app To enable noise cancellation, you need HMSNoiseCancellationPlugin. Initialise HMSNoiseCancellationPlugin using this path to the AI model. You also pass the initial state of noise cancellation plugin as well. @@ -148,8 +153,7 @@ roomModel.isNoiseCancellationEnabled ## How to check if noise cancellation is enabled in the room -To make noise cancellation work your room needs to have noise cancellation feature enabled. You can check if noise cancellation is enabled using roomModel.isNoiseCancellationAvailable. To enable noise canellation in your rooms, reach out to support@100ms.live or 100ms discord. - +To make noise cancellation work your room needs to have noise cancellation feature enabled. You can check if it is enabled using roomModel.isNoiseCancellationAvailable. ```swift roomModel.isNoiseCancellationAvailable ``` diff --git a/docs/javascript/v2/how-to-guides/extend-capabilities/plugins/krisp-noise-cancellation.mdx b/docs/javascript/v2/how-to-guides/extend-capabilities/plugins/krisp-noise-cancellation.mdx index 41c75ed6be..95d3743599 100644 --- a/docs/javascript/v2/how-to-guides/extend-capabilities/plugins/krisp-noise-cancellation.mdx +++ b/docs/javascript/v2/how-to-guides/extend-capabilities/plugins/krisp-noise-cancellation.mdx @@ -14,6 +14,12 @@ This guide provides an overview of usage of the noise suppression plugin of 100m Minimum version requirement for `hms-video-store` - 0.11.7 + +**IMPORTANT**
+Enable Noise Cancellation in the template configuration. Learn more about enabling this feature from [here](/get-started/v2/get-started/features/noise-cancellation#enabling-the-noise-cancellation) +
+ + **Get the 100ms noise cancellation Package** ```bash section=GetHMSNoiseCancellationPackage sectionIndex=1 diff --git a/docs/javascript/v2/quickstart/embed-with-iframe.mdx b/docs/javascript/v2/quickstart/embed-with-iframe.mdx index 149e2f0895..7de7b4995d 100644 --- a/docs/javascript/v2/quickstart/embed-with-iframe.mdx +++ b/docs/javascript/v2/quickstart/embed-with-iframe.mdx @@ -20,7 +20,7 @@ If you want a quick integration of a fully featured video conferencing/live stre ## Implementation -1. When customising and deploying our sample web app, you can create unique [custom domain room links](/javascript/v2/quickstart/react-sample-app/build-and-deploy) for each role in your room. For instance, custom room links for a self-hosted app with domain "my.video.app" +1. When customising and deploying our sample web app, you can create unique [custom domain room links](/prebuilt/v2/prebuilt/room-codes/room-links#custom-domain-room-links) - host : https://`my.video.app`/meeting/`room-code` - guest : https://`my.video.app`/meeting/`room-code` diff --git a/docs/javascript/v2/quickstart/react-sample-app/build-and-deploy.mdx b/docs/javascript/v2/quickstart/react-sample-app/build-and-deploy.mdx deleted file mode 100644 index 19a57a69b1..0000000000 --- a/docs/javascript/v2/quickstart/react-sample-app/build-and-deploy.mdx +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: Build and Deploy -nav: 1.08 ---- - -## Overview - -Once you have customized the app as per your needs and tested it in your local development environment, you can build and deploy the app to one of your preferred platforms like Vercel, Netlify, AWS, Web server, Docker, etc. - -## Build the app - -[100ms-web](https://github.com/100mslive/100ms-web/) app is just like any react application. To build the app, just run - -```bash -yarn build or -npm build -``` - -If everything goes well, you should see something like this in the terminal. - -```bash -webpack 5.70.0 compiled successfully in 22546 ms -✨ Done in 30.72s. -``` - -All the files that got built in the above step will be in the `build/` directory. This is all we need to deploy. The files are just plain HTML/CSS/JS and could be deployed in many different ways. We discuss some popular ones below. - -## Deploy the code - -There are many ways to deploy 100ms-web. We discuss a few methods below. Please feel free to reach out to us if you don't find your preferred platform here. - -- [Netlify](#netlify) -- [Vercel](#vercel) - -### Netlify - -Deploying 100ms-web to [Netlify](https://www.netlify.com/) is straightforward and same as deploying any project to Netlify. There are two ways to deploy to Netlify: - -#### Deploy via Git (Recommended) - -If you had forked 100ms-web and have customized it, it is highly likely you are using git. If you did, You can directly deploy to Netlify. Netlify supports GitHub, GitLab and Bitbucket. Netlify has great documentation on how to do it. - -- [A Step-by-Step Guide: Deploying on Netlify - ](https://www.netlify.com/blog/2016/09/29/a-step-by-step-guide-deploying-on-netlify/) -- [Netlify Tutorial - Deploying from Git (Video)](https://youtu.be/4h8B080Mv4U) - -#### Deploy the files directly - -If you want to directly upload your files, Netlify provides a very easy drag & drop interface for it. - -- [Netlify Tutorial - Drag and drop deploys on Netlify (Video)](https://youtu.be/etZ9HSUoTPU) - -> We recommend using Git in general if you are planning to use this code for production. Only use the direct method if you want to quickly test something or do some proof-of-concept. - -If you are interested in Netlify's CI/CD or just want to learn more about deploying to Netlify, please check their docs [here](https://docs.netlify.com/site-deploys/create-deploys/) - -### Vercel - -Deploying to Vercel is pretty much straightforward similar to Netlify. - -- You can start with a [New Vercel app](https://vercel.com/new). In that page, import your repository from git, Click next - - ![vercel_wiki_0](/docs/v2/vercel_0.png) - -- In the next page, create a team first - - ![vercel_wiki_1](/docs/v2/vercel_1.png) - -- Then in **Configure Project** section, select **Framework preset** as **Create React App** and then add any extra build settings you want to add. Make sure to add the environment variables for the app below that. For more details See [Environment Variables](https://github.com/100mslive/100ms-web/wiki/Environment-Variables) - - ![vercel_wiki_2](/docs/v2/vercel_2.png) - - > You can change the environment variables any time later in the settings page of your project dashboard. Every time you change a variable, add/remove a variable, you have to rebuild and make a new deployment for the environment variables to take effect. This is a mandatory step and must be done. - -- Now click deploy. Wait for the project to be deployed. Once it's successful, you should see a screen like this - - ![vercel_wiki_3](/docs/v2/vercel_3.png) - -- Click **Go to dashboard**. In the dashboard page, you should be able to see the app by clicking "Visit" button on the deployment screen. Click it to see your app in action. - -That's it. You are all done. diff --git a/docs/javascript/v2/quickstart/react-sample-app/customize-the-app.mdx b/docs/javascript/v2/quickstart/react-sample-app/customize-the-app.mdx deleted file mode 100644 index 6117e79aad..0000000000 --- a/docs/javascript/v2/quickstart/react-sample-app/customize-the-app.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: Customize Your App -nav: 1.07 ---- - -## Overview - -This [sample app](https://github.com/100mslive/100ms-web) allows UI customization like updating logo, tile aspect ratio, theme, etc. To make these customizations, you can use various environment variables in the env file. - -## Change logo - -You can use `REACT_APP_LOGO` variable to change your logo in the sample app to customize the UI as per your brand. - -**Example:** - -```bash -REACT_APP_LOGO='https://example.com/public/logo.svg' -``` - -## Change tile aspect ratio - -You can use the `REACT_APP_TILE_SHAPE` variable to customize the aspect ratio of video tiles; the format is `width-height`. - -**Examples:** - -- `REACT_APP_TILE_SHAPE='1-1'`: for square tiles -- `REACT_APP_TILE_SHAPE='4-3'`: for landscape tiles -- `REACT_APP_TILE_SHAPE='16-9'`: for wide tiles -- `REACT_APP_TILE_SHAPE='9-16'`: for mobile view - -## Change theme - -- Use the `REACT_APP_THEME` variable to switch the theme between dark and light mode - - **Examples:** - - `REACT_APP_THEME='dark'`: for dark theme - - `REACT_APP_THEME='light'`: for light theme - -## Change font - -You can use `REACT_APP_FONT` variable to update the font used in the app. The font must be imported in your styling for it to work. - -**Example:** - -```bash -REACT_APP_FONT='Roboto' -``` - -## Playlist tracks (watch party) - -- You can play music or videos from a URL for everyone in the room to watch together. The support is only for file formats that are supported by the native audio and video elements, but it's super cool. - -- You can configure the list of audio/video tracks that can be played by a person in the room for everyone as follows: - - - **Example: Audio playlist** - - ```json - REACT_APP_AUDIO_PLAYLIST=[ - { - "name": "Audio1", - "id": "audio1", - "metadata": { "description": "Artist1" }, - "url": "https://d2qi07yyjujoxr.cloudfront.net/webapp/playlist/audio1.mp3", - "type": "audio" - }, - { -
- } - ] - ``` - - - **Example: Video playlist** - - ```json - REACT_APP_VIDEO_PLAYLIST=[ - { - "name": "Video2", - "id": "video2", - "url": "https://d2qi07yyjujoxr.cloudfront.net/webapp/playlist/video2.mp4", - "type": "video" - }, - { -
- } - ] - ``` diff --git a/docs/javascript/v2/quickstart/react-sample-app/quickstart.mdx b/docs/javascript/v2/quickstart/react-sample-app/quickstart.mdx deleted file mode 100644 index 29887e8754..0000000000 --- a/docs/javascript/v2/quickstart/react-sample-app/quickstart.mdx +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: Quickstart Guide -nav: 1.06 ---- - -## Overview - -[100ms-web](https://github.com/100mslive/100ms-web) is a fully featured sample application built using React. This application gives you a ready-made starting point for writing your video apps or customizing the cloned app based on your needs. - -You can use this app to test: - -- **Basic functionalities**: Video calling, Recording, Interactive live streaming (HLS), External streaming (RTMP), Screenshare, and Picture-in-Picture (PiP). -- **Interactive features**: Chat (broadcast, direct, & group), Raise hand. -- **Plugins**: Virtual background, Collaborative whiteboard. - -The app also includes other features and capabilities like custom audio/video tracks, control remote peers, network quality and performance stats, adaptive bitrate (Simulcast), and more. - -The sample app is intended to accelerate development, provide a full reference of all the features and capabilities, and demonstrate implementation with easy-to-read code. - -If you are a developer trying to build an app from scratch, please check our [quickstart guide](/javascript/v2/get-started/react-quickstart). Quickstart provides a simple and quick way to build a reference app and familiarize yourself with the different capabilities of the platform with minimal code. - -## Local development - - - - - -### Set up the app repository - -- Clone the repository from [GitHub](https://github.com/100mslive/100ms-web). - - ```bash section=cloneRepo sectionIndex=1 - git clone git@github.com:100mslive/100ms-web.git - ``` - -- Change your working directory to `100ms-web`. - - ```bash section=cloneRepo sectionIndex=2 - cd 100ms-web - ``` - -- Install the dependencies using `npm` or `yarn`. - - ```bash section=cloneRepo sectionIndex=3 - npm install or - yarn install - ``` - -### Configure auth token endpoint - -- Set environment variables to configure token generation endpoint. Use the following command to copy the values from "example.env" to a new file called ".env". - - ```bash section=envVariable sectionIndex=1 - cp example.env .env - ``` - -- Update the 100ms token endpoint as an environment variable to handle auth token generation. You can get your token endpoint from the [Developer section of 100ms' Dashboard](https://dashboard.100ms.live/developer). - - ![token endpoint](https://user-images.githubusercontent.com/11087313/140727818-43cd8be4-b3bf-4b34-9921-a77f9a1b819d.png) - - **Example**: - - ```bash - REACT_APP_TOKEN_GENERATION_ENDPOINT = 'https://prod-in2.100ms.live/hmsapi/example.app.100ms.live/' - ``` - -### Start and test the app - -#### Start the app - -- Start the app with the below command. - - ```bash section=runApp sectionIndex=1 - npm start or - yarn start - ``` - -- The app should now be running at [http://localhost:3000/](http://localhost:3000/). You should see a welcome message saying, "Almost There!". - -#### Create and join a room - -- To test audio/video functionality, you need to connect to a 100ms room; please check following steps for the same: - - 1. Navigate to your [100ms Dashboard](https://dashboard.100ms.live/dashboard) or [create an account](https://dashboard.100ms.live/register) if you don't have one. - 2. To test this app quickly, use the "Video Conferencing Starter Kit" to create a room with a default template assigned to it. - 3. Go to [Rooms page](https://dashboard.100ms.live/rooms) in your Dashboard, copy the "Room Id" of the room and role created in the above step. - 4. Add the "Room Id" and role to the localhost URL to test the app. For example, `http://localhost:3000/633fcdd84208780bf665346a/host` - -
- -
- -
- -## Next steps - -#### [Customize the app](/javascript/v2/get-started/react-sample-app/customize-the-app) - -Customize your UI like updating logo, tile aspect ratio, theme, etc. as per your brand. - -#### [Build and deploy](/javascript/v2/get-started/react-sample-app/build-and-deploy) - -Building and deploying the 100ms sample app is simple and the same as any React project. You can deploy this to Vercel, Netlify, AWS, Web server, Docker, etc., as you prefer. - -#### [iframe integration](/javascript/v2/get-started/react-sample-app/embed-with-iframe) - -You can use the [deployed](/javascript/v2/get-started/react-sample-app/build-and-deploy) URL in an iframe to embed the whole sample app inside your UI. - -#### [Code structure](https://github.com/100mslive/100ms-web/wiki/code-structure) - -Check the overall project structure of the sample app to understand how the code is organized and understand various components of the app. - -#### [Simple quickstart](/javascript/v2/get-started/react-quickstart) - -Please check our quickstart guide if you're trying to get started with 100ms with a basic app. diff --git a/docs/prebuilt/v2/prebuilt/Appearance.mdx b/docs/prebuilt/v2/prebuilt/Appearance.mdx index 3ddde19cf7..2f44f93184 100644 --- a/docs/prebuilt/v2/prebuilt/Appearance.mdx +++ b/docs/prebuilt/v2/prebuilt/Appearance.mdx @@ -9,7 +9,7 @@ Head to [100ms dashboard](https://dashboard.100ms.live/dashboard), choose an exi ## Changing Logo -You can easily replace the default logo that appears on the Prebuilt interface with your own logo. To do this, click on "Logo" input field and provide the URL of your custom logo image. Ensure that the image meets your desired dimensions. The supported image format is .png. For best results, upload an image with a transparent background. +You can easily replace the default logo that appears on the Prebuilt interface with your own logo. To do this, click on "Logo" input field and upload your custom logo image. Ensure that the image meets your desired dimensions. The supported image format is .png. For best results, upload an image with a transparent background.
diff --git a/docs/prebuilt/v2/prebuilt/Screens-and-components.mdx b/docs/prebuilt/v2/prebuilt/Screens-and-components.mdx index e0eb2ea1ad..5d2fd0d450 100644 --- a/docs/prebuilt/v2/prebuilt/Screens-and-components.mdx +++ b/docs/prebuilt/v2/prebuilt/Screens-and-components.mdx @@ -22,13 +22,33 @@ By default, components are enabled based on the role associated with the use cas ### Preview Screen +The Prebuilt preview screen offers a set of components designed to enhance the initial set-up before joining a room. Choose a role to kickstart your customization journey. -#### Header +##### Header Set up a custom header for each roles using Title and Subtitle fields. -#### Join +##### Join Choose how peers join the room - - **Join Now** - Peers from this role will see "Join Now" and would be able join after entering their name. - - **Join and Start Streaming** - Peers from this role will see "Go Live" and would be able to join and start streaming when they join the room. Ensure that this role has the permission to "Start/Stop HLS streaming" as `enabled` under Role permissions on its template configuration. + - **Join Now** - Peers from this role will see 'Join Now' and would be able join after entering their name. + - **Join and Start Streaming** - Peers from this role will see 'Go Live' and would be able to join and start streaming when they join the room. Ensure that this role has the permission to 'Start/Stop HLS streaming' as `enabled` under Role permissions on its template configuration. + +##### Skip Preview + - Turn the toggle to enable or disable skipping the preview screen. + - **Enabled**: When this option is enabled, the preview screen is skipped, and the user is taken directly to the room screen. + + When skip preview is enabled, ensure that the username is passed in the prebuilt links or in the options in the prebuilt component. If not provided, the system will generate a random UUID as the peer's name. + + +##### Noise Cancellation State + - Turn the toggle to define the initial state for Noise Cancellation. + - **Enabled**: When this option is enabled, Noise Cancellation will be turned on by default in preview screen for all the peers of this role. + + Noise Cancellation should be enabled from 'Advanced Settings' under the Template to enable this setting. Learn more about enabling this feature from [here](/get-started/v2/get-started/features/noise-cancellation#enabling-the-noise-cancellation) + + +##### Virtual Background + - Turn the toggle to enable virtual background for this role. Click on 'Upload Image' to add your own image as backgrounds. + Head over [Virtual Background](/prebuilt/v2/prebuilt/virtual-background) for more details. + ### Room Screen The Prebuilt room screen offers a set of components designed to enhance interactivity within a room. Choose a role to kickstart your customization journey. @@ -36,68 +56,104 @@ The Prebuilt room screen offers a set of components designed to enhance interact #### Chat Toggle the chat functionality on or off for a specific role. When chat is disabled for a role, that role won't have access to the chat component within the room. If enabled, you can further fine-tune and customize the functionalities of the Prebuilt UI for chat. -##### Initial State -This setting determines whether the chat component is initially open or closed when a participant joins a session. Keeping the initial state as open can be particularly useful when using the chat overlay view on mobile devices. +##### Chat UI +Customize the prebuilt chat UI + +- ###### Chat Panel Name and Message Placeholder + This setting lets you rename the chat panel name and message placeholder as per your design + +- ###### Initial State + This setting determines whether the chat component is initially open or closed when a participant joins a session. Keeping the initial state as open can be particularly useful when using the chat overlay view on mobile devices. + +- ###### Enable Overlay View + In an overlay view, the chat component overlays on top of video tiles, delivering an immersive chat experience for mobile livestreaming scenarios. Please note that this chat view is exclusively available on mobile devices and is not supported on large-screen applications. + +##### Chat Controls +Customize the prebuilt chat controls -##### Enable Overlay View -In an overlay view, the chat component overlays on top of video tiles, delivering an immersive chat experience for mobile livestreaming scenarios. Please note that this chat view is exclusively available on mobile devices and is not supported on large-screen applications. +- ###### Allow Pinning messages + Enabling this feature allows the selected role to pin important chat messages on the chat component, making them visible to everyone. -##### Allow Pinning messages -Enabling this feature allows the selected role to pin important chat messages on the chat component, making them visible to everyone. +- ###### Allow hiding messages + Enabling this feature allows the selected role to hide the unwanted chat messages, making it invisible to everyone. +- ###### Allow blocking peers in chat + Enabling this feature allows the selected role to block the peer from sending any messages in the chat. -#### Participant List +- ###### Allow pausing chat + Enabling this feature allows the selected role to pause the chat messages during the session, once paused no peer can send any messages in the chat until it is resumed. + +- ###### Public chat + Enabling this feature allows the selected role to send the chat messages as public and everyone in the room can view these messages. + +- ###### Private chat + Enabling this feature allows the selected role to send the chat messages as private to everyone in the room which has the permission to send pricate messages. + +- ###### Role-specific chat + Enabling this feature allows the selected role to send the chat messages to another selected roles + + +##### Participant List Ensure participant list accessibility for roles that require visibility. In certain scenarios, such as large room viewers, there may be no need for them to view other participants, but Hosts or Broadcasters might find it essential to maintain oversight of the participant list. By default, the participant list is enabled for all roles across all templates. -#### Video Tile Layout +##### Video Tile Layout 100ms Prebuilt UI allows tweaks on its default grid layout and supports multiple video layouts. -##### Enable local tile inset -Enabling this makes the local peer's *(for the selected role)* tile in a smaller inset window as default, alternatively if it's disabled, the local tile will be part of the grid view. Join with at least two peers to preview this configuration in action. +- ###### Enable local tile inset + Enabling this makes the local peer's *(for the selected role)* tile in a smaller inset window as default, alternatively if it's disabled, the local tile will be part of the grid view. Join with at least two peers to preview this configuration in action. + +- ###### Prominent roles + Defining one or more roles as prominent gives them higher tile view prominence for the selected role. For example: + - For a 1:1 call, define Host as prominent tile for Guest and vice-versa. + - For a webinar, set Host as the prominent tile for all other roles. Every role would see Host as the primary tile in the room + - For a mobile-first livestreaming view, set Broadcaster as prominent tile for every other role, including Broadcaster themselves. -##### Prominent roles -Defining one or more roles as prominent gives them higher tile view prominence for the selected role. For example: -- For a 1:1 call, define Host as prominent tile for Guest and vice-versa. -- For a webinar, set Host as the prominent tile for all other roles. Every role would see Host as the primary tile in the room -- For a mobile-first livestreaming view, set Broadcaster as prominent tile for every other role, including Broadcaster themselves. +- ###### Can spotlight peer + Allow this config for roles who can spotlight others or themselves in the room. Spotlighting a tile reflects for everyone in the room. -##### Can spotlight peer -Allow this config for roles who can spotlight others or themselves in the room. Spotlighting a tile reflects for everyone in the room. + + Assigning a role as 'prominent' for either themselves or other roles establishes a fixed layout that remains unchanged for the entire session. This is particularly well-suited for scenarios where a specific role needs to maintain a constant presence, as seen in webinars and livestreaming. Conversely, spotlighting either their own tile or the tiles of others permits specific participant tiles to temporarily take center stage within the session, allowing for more adaptable layout adjustments as needed + - -Assigning a role as "prominent" for either themselves or other roles establishes a fixed layout that remains unchanged for the entire session. This is particularly well-suited for scenarios where a specific role needs to maintain a constant presence, as seen in webinars and livestreaming. Conversely, spotlighting either their own tile or the tiles of others permits specific participant tiles to temporarily take center stage within the session, allowing for more adaptable layout adjustments as needed. - +- ###### Tiles in view + Select the default number of video tiles that a peer will view in a room. For optimal performance it is recommended to keep it at 9. + + This setting is currently only supported on web. + -#### Be Right Back (BRB) +##### Be Right Back (BRB) Depending on your use case, allow roles to set themselves on BRB mode. If disabled, this component will not be visible to the selected role. -#### Emoji Reactions +##### Emoji Reactions Enabling this for a role allows its peers to send emoji reactions in the room. -#### Bring others on Stage +##### Hand Raise +Allows participants with this role to virtually raise their hand during a session, indicating their intention to: request attention or assistance, signal a question or comment, or request to join the stage (if 'Bring others on stage' is enabled). If 'Bring others on stage' is enabled, raising a hand will prompt the session host or moderator to consider changing the participant's role, allowing them to join the stage and actively participate. If 'Bring others on stage' is disabled, the hand raise will serve as a visual cue without any automatic role change. The session host or moderator can acknowledge the raised hand and address the participant's need accordingly. + +##### Bring others on Stage Stage is a virtual space within a room that enables participants to actively engage with their audio and video, and publish it with a much larger audience. With the 'Bring on Stage' feature enabled, viewers who are not on stage can easily interact with the participants on stage, once they have been granted access to the stage. Enable Bring on stage for roles that require the ability to allow or deny stage requests from off-stage participants. By default, this feature is enabled for Hosts, Broadcasters and Co-broadcasters, while viewers with non-publishing roles can use the hand-raise option to request stage access. This component can extend its support to conferencing, webinars and livestreaming scenarios. However, if your use case does not require this feature, you can simply disable it to ensure a seamless experience for all users. -##### Bring on Stage Label +- ###### Bring on Stage Label -Customize text label that Hosts and Broadcasters will see when they receive on stage requests. The default text is "Bring to stage". + Customize text label that Hosts and Broadcasters will see when they receive on stage requests. The default text is "Bring to stage". -##### Remove from Stage Label +- ###### Remove from Stage Label -Customize text label that Hosts and Broadcasters will see when they receive on stage requests. The default text is "Remove from stage". + Customize text label that Hosts and Broadcasters will see when they receive on stage requests. The default text is "Remove from stage". -##### On Stage and Off Stage Role -An off stage participant becomes an on stage participant once their stage access has been accepted. For instance, an off stage role, say Viewer requests a Broadcaster to go on stage. Once their request is accepted, the Viewer can transition to an on stage role, which could be a Viewer-on-stage, a Guest, or a Co-broadcaster. +- ###### On Stage and Off Stage Role + An off stage participant becomes an on stage participant once their stage access has been accepted. For instance, an off stage role, say Viewer requests a Broadcaster to go on stage. Once their request is accepted, the Viewer can transition to an on stage role, which could be a Viewer-on-stage, a Guest, or a Co-broadcaster. -Let's take a quick example to understand this better. + Let's take a quick example to understand this better. -Assume a livestreaming scenario with Bring on stage enabled for Broadcasters and Co-Broadcasters, where -- Off-stage participants have been assigned role: Viewer a role with no audio/video publish permissions -- On-stage participants have been assigned role : Guest, a role with audio/video publish permissions + Assume a livestreaming scenario with Bring on stage enabled for Broadcasters and Co-Broadcasters, where + - Off-stage participants have been assigned role: Viewer a role with no audio/video publish permissions + - On-stage participants have been assigned role : Guest, a role with audio/video publish permissions -Once the configuration is set under Bring on stage, join Prebuilt links or apps as these roles: Broadcaster, Co-Broadcaster and Viewer to try bring on stage feature like below: -1. Participants from Viewer role can choose to raise their hand during a session to request to go on stage and interact with Broadcaster(s) and Co-Broadcaster(s) -2. Broadcaster(s) and Co-Broadcaster(s) will receive stage access notification from these off-stage partcipants; where they can choose to accept or deny the stage request. Meanwhile, a Viewer can choose to lower their hand untill their request has been accepted or denied. Broadcaster(s) can also track all such requests under "Hand Raise" section under participant list. -3. When a Broadcaster accepts a stage request from a Viewer(an off-stage participant), the Viewer becomes a Guest (an on-stage participant) as per the configuration set -4. Broadcaster(s) can choose to mute/unmute audio/video for Guest or simply remove them from stage. + Once the configuration is set under Bring on stage, join Prebuilt links or apps as these roles: Broadcaster, Co-Broadcaster and Viewer to try bring on stage feature like below: + 1. Participants from Viewer role can choose to raise their hand during a session to request to go on stage and interact with Broadcaster(s) and Co-Broadcaster(s) + 2. Broadcaster(s) and Co-Broadcaster(s) will receive stage access notification from these off-stage partcipants; where they can choose to accept or deny the stage request. Meanwhile, a Viewer can choose to lower their hand untill their request has been accepted or denied. Broadcaster(s) can also track all such requests under "Hand Raise" section under participant list. + 3. When a Broadcaster accepts a stage request from a Viewer(an off-stage participant), the Viewer becomes a Guest (an on-stage participant) as per the configuration set + 4. Broadcaster(s) can choose to mute/unmute audio/video for Guest or simply remove them from stage. diff --git a/docs/react-native/v2/how-to-guides/extend-capabilities/noise-cancellation.mdx b/docs/react-native/v2/how-to-guides/extend-capabilities/noise-cancellation.mdx index 7ad0253911..d348acdbe1 100644 --- a/docs/react-native/v2/how-to-guides/extend-capabilities/noise-cancellation.mdx +++ b/docs/react-native/v2/how-to-guides/extend-capabilities/noise-cancellation.mdx @@ -21,7 +21,11 @@ The Noise Cancellation feature employs a sophisticated AI model trained specific `@100mslive/react-native-hms` version 1.10.2 or later is required to utilize the Noise Cancellation feature in your React Native application. -Also, this feature has gated access currently. To enable Noise Cancellation in your rooms, reach out to **support@100ms.live** or connect with us on [100ms Discord](https://discord.com/invite/kGdmszyzq2). + +**IMPORTANT**
+Enable Noise Cancellation in the template configuration. Learn more about enabling this feature from [here](/get-started/v2/get-started/features/noise-cancellation#enabling-the-noise-cancellation) +
+ ## Usage diff --git a/docs/react-native/v2/quickstart/token-endpoint.mdx b/docs/react-native/v2/quickstart/token-endpoint.mdx deleted file mode 100644 index 051b0b7bc9..0000000000 --- a/docs/react-native/v2/quickstart/token-endpoint.mdx +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Auth Token Endpoint Guide -nav: 2.5 ---- - -## Overview - -100ms provides an option to get `Auth Tokens` without setting up a token generation backend service to simplify your integration journey while testing the [sample app](https://github.com/100mslive/100ms-web) or building integration with 100ms. - -You can find the token endpoint from the [developer page](https://dashboard.100ms.live/developer) in your 100ms dashboard. - -![Token endpoint](/guides/token-endpoint-dashboard.png) - -We recommend you move to your token generation service before you transition your app to production, as our token endpoint service will not scale in production. - -The "Sample Apps" built using 100ms client SDKs require an `Auth Token` to join a room to initiate a video conferencing or live streaming session. Please check the [Authentication and Tokens guide](/react-native/v2/foundation/security-and-tokens) - -Please note that you cannot use the token endpoint to create a `Management Token` for server APIs. Refer to the [Management Token section](/react-native/v2/foundation/security-and-tokens#management-token) in Authentication and Tokens guide for more information. - -## Get an auth token using token endpoint - -You can use the token endpoint from your 100ms dashboard while building integration with 100ms. This acts as a tool enabling front-end developers to complete the integration without depending on the backend developers to set up a token generation backend service. - -**URL format:** `api/token` - -100ms token endpoint can generate an Auth token with the inputs passed, such as room_id, role, & user_id (optional - your internal user identifier as the peer's user_id). You can use [jwt.io](https://jwt.io/) to validate whether the Auth token contains the same input values. - - - - -```bash -curl --location --request POST 'https://prod-in2.100ms.live/hmsapi/johndoe.app.100ms.live/api/token' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "room_id":"633fcdd84208780bf665346a", - "role":"host", - "user_id":"1234" -}' -``` - - - - -```json -{ - "token": "eyJ0eXAiOiJKV1QiLCJhbGciOi***************************R3tT-Yk", - "msg": "token generated successfully", - "status": 200, - "success": true, - "api_version": "2.0.192" -} -``` - - - -### Example client-side implementation - -You can directly add this to your client-side implementation, check our [sample app](https://github.com/100mslive/react-native-hms/blob/7bd6420ea49d520acd881de9ac5d76b36498bb67/example/src/services/index.ts#L3) for reference. - -### Disable 100ms token endpoint - -Due to some security concerns, if you don't wish to use the token endpoint to generate Auth tokens, then you can disable it on the [Developers page](https://dashboard.100ms.live/developer) on your dashboard by disabling the option "Disable <room_id>/<role> link format." - -![Disable Token endpoint](/guides/disable-token-endpoint.png) - -#### Error Response - -Once you're disabled it on the dashboard, the requests to create an Auth token using the 100ms token endpoint will throw the below error: - -```json -{ - "success": false, - "msg": "Generating token using the room_id and role is disabled.", - "api_version": "2.0.192" -} -``` diff --git a/docs/server-side/v2/how-to-guides/Session Initiation Protocol (SIP)/SIP-DTMF-transmission.mdx b/docs/server-side/v2/how-to-guides/Session Initiation Protocol (SIP)/SIP-DTMF-transmission.mdx index 1ec88dc894..fdb41872cf 100644 --- a/docs/server-side/v2/how-to-guides/Session Initiation Protocol (SIP)/SIP-DTMF-transmission.mdx +++ b/docs/server-side/v2/how-to-guides/Session Initiation Protocol (SIP)/SIP-DTMF-transmission.mdx @@ -39,7 +39,7 @@ The DTMF API provided by 100ms enables the transmission of DTMF tones directly t | Parameter | Type | Description | |-----------|-------|-----------------------------------------------------------------------------------------------------------------------------| -| digits | array | An array of strings, where each string is a character representing a DTMF tone. Valid characters are 0-9, *, #, a, b, c, d. | +| digits | array | An array of strings, where each string is a character representing a DTMF tone. Valid characters are 0-9, *, #, a, b, c, d | - **Target Recipients:** The DTMF tones will only be sent to all SIP participants present in the call. WebRTC participants will not receive these tones. - **Transmission Order:** Requests are queued and processed sequentially; subsequent requests will not be processed until all previous tones have been sent. @@ -54,16 +54,23 @@ The DTMF API provided by 100ms enables the transmission of DTMF tones directly t ``` -**400 Bad Request** - When the request is invalid. +**400 Bad Request** - When the request is invalid due to non-acceptable digit. ```json { - "code": 400, - "message": "no sip participants in the call", - "details": [""] + "code": 400, + "message": "invalid digit" } ``` +**400 Bad Request** - When the request is invalid due to missing digits + +```json +{ + "code": 400, + "message": "digits are mandatory" +} +``` This documentation provides a clear pathway for integrating DTMF transmission capabilities within your applications, ensuring effective interaction with systems requiring numerical input during SIP calls. \ No newline at end of file diff --git a/docs/server-side/v2/how-to-guides/configure-webhooks/webhook.md b/docs/server-side/v2/how-to-guides/configure-webhooks/webhook.md index f59301b617..291e659a4b 100644 --- a/docs/server-side/v2/how-to-guides/configure-webhooks/webhook.md +++ b/docs/server-side/v2/how-to-guides/configure-webhooks/webhook.md @@ -138,19 +138,20 @@ This event will be sent when any peer joins the room successfully #### Attributes -| Name | Type | Description | -| :----------------- | :------------------- | :------------------------------------------------------------------------------------------------------ | -| room_id | `string` | 100ms assigned room id

Example: 5f9edc6ac238215aec2312df | -| room_name | `string` | Room name provided when creating the room

Example: Test Room | -| session_id | `string` | 100ms assigned id to identify the session

Example: 5f9edc6bd238215aec7700df | -| peer_id | `string` | 100ms assigned id to identify the joining user

Example: bd0c76fd-1ab1-4d7d-ab8d-bbfa74b620c4 | -| user_id | `string` | User id assigned by the customer

Example: user.001 | -| template_id | `string` | Template ID of the room

Example: 66112497abcd52312556c4gg | -| user_name | `string` | User name of the joining user

Example: Test user | -| user_data | `string` | User data of the joining user

Example: `{"isHandRaised":true}` | -| role | `string` | Role of the joining user

Example: host | -| joined_at | `timestamp (in UTC)` | Timestamp when user joined

Example: 2020-11-11T16:32:17Z | -| session_started_at | `timestamp (in UTC)` | Timestamp when session started

Example: 2020-11-11T16:32:17Z | +| Name | Type | Description | +|:-------------------|:---------------------|:------------------------------------------------------------------------------------------------------------------------------| +| room_id | `string` | 100ms assigned room id

Example: 5f9edc6ac238215aec2312df | +| room_name | `string` | Room name provided when creating the room

Example: Test Room | +| session_id | `string` | 100ms assigned id to identify the session

Example: 5f9edc6bd238215aec7700df | +| peer_id | `string` | 100ms assigned id to identify the joining user

Example: bd0c76fd-1ab1-4d7d-ab8d-bbfa74b620c4 | +| user_id | `string` | User id assigned by the customer

Example: user.001 | +| template_id | `string` | Template ID of the room

Example: 66112497abcd52312556c4gg | +| user_name | `string` | User name of the joining user

Example: Test user | +| user_data | `string` | User data of the joining user

Example: `{"isHandRaised":true}` | +| role | `string` | Role of the joining user

Example: host | +| joined_at | `timestamp (in UTC)` | Timestamp when user joined

Example: 2020-11-11T16:32:17Z | +| session_started_at | `timestamp (in UTC)` | Timestamp when session started

Example: 2020-11-11T16:32:17Z | +| type | `string` | Defines the type of peer to join the room. It is 'sip' for peers joining through SIP and 'regular' for peers joining directly | #### Sample `peer.join.success` event @@ -169,6 +170,7 @@ This event will be sent when any peer joins the room successfully "room_name": "**********", "session_id": "************************", "template_id": "************************", + "type": "regular", "user_id": "************************", "user_name": "********", "user_data": "", @@ -200,6 +202,7 @@ This event will be sent when peer leaves the room | message | `string` | Reason specified while kicking peer out of room, see more details below

Example: removed due to misconduct | | joined_at | `timestamp (in UTC)` | Timestamp when user joined

Example: 2020-11-11T16:32:17Z | | session_started_at | `timestamp (in UTC)` | Timestamp when session started

Example: 2020-11-11T16:32:17Z | +| type | `string` | Defines the type of peer to join the room. It is 'sip' for peers joining through SIP and 'regular' for peers joining directly | #### Peer Leave Reason @@ -246,6 +249,7 @@ This event will be sent when peer leaves the room "room_name": "**********", "session_id": "************************", "template_id": "************************", + "type": "regular", "user_id": "************************", "user_name": "********", "user_data": "", @@ -263,18 +267,19 @@ This event will be sent when a peer fails to join a room. This can occur when, #### Attributes -| Name | Type | Description | -| :------------ | :------------------- | :------------------------------------------------------------------------------------------------------ | -| room_id | `string` | 100ms assigned room id

Example: 5f9edc6ac238215aec2312df | -| room_name | `string` | Room name provided when creating the room

Example: Test Room | -| peer_id | `string` | 100ms assigned id to identify the joining user

Example: bd0c76fd-1ab1-4d7d-ab8d-bbfa74b620c4 | -| user_id | `string` | User id assigned by the customer

Example: user.001 | -| template_id | `string` | Template ID of the room

Example: 66112497abcd52312556c4gg | -| user_name | `string` | User name of the user

Example: Test user | -| user_data | `string` | User data of the user

Example: `{"isHandRaised":true}` | -| role | `string` | Role of the user

Example: host | -| joined_at | `timestamp (in UTC)` | Timestamp when user joined

Example: 2020-11-11T16:32:17Z | -| error_message | `string` | Reason for failure

Example: Peer not joined | +| Name | Type | Description | +|:--------------|:---------------------|:------------------------------------------------------------------------------------------------------------------------------| +| room_id | `string` | 100ms assigned room id

Example: 5f9edc6ac238215aec2312df | +| room_name | `string` | Room name provided when creating the room

Example: Test Room | +| peer_id | `string` | 100ms assigned id to identify the joining user

Example: bd0c76fd-1ab1-4d7d-ab8d-bbfa74b620c4 | +| user_id | `string` | User id assigned by the customer

Example: user.001 | +| template_id | `string` | Template ID of the room

Example: 66112497abcd52312556c4gg | +| user_name | `string` | User name of the user

Example: Test user | +| user_data | `string` | User data of the user

Example: `{"isHandRaised":true}` | +| role | `string` | Role of the user

Example: host | +| joined_at | `timestamp (in UTC)` | Timestamp when user joined

Example: 2020-11-11T16:32:17Z | +| error_message | `string` | Reason for failure

Example: Peer not joined | +| type | `string` | Defines the type of peer to join the room. It is 'sip' for peers joining through SIP and 'regular' for peers joining directly | #### Peer join failure Reason @@ -299,6 +304,7 @@ This event will be sent when a peer fails to join a room. This can occur when, "room_name": "**********", "user_id": "************************", "template_id": "************************", + "type": "regular", "user_name": "********", "user_data": "", "error_message": "role not allowed" @@ -316,20 +322,21 @@ This event will be sent when the peer leave fails. This can occur when, #### Attributes -| Name | Type | Description | -| :------------ | :------------------- | :------------------------------------------------------------------------------------------------------ | -| room_id | `string` | 100ms assigned room id

Example: 5f9edc6ac238215aec2312df | -| room_name | `string` | Room name provided when creating the room

Example: Test Room | -| peer_id | `string` | 100ms assigned id to identify the joining user

Example: bd0c76fd-1ab1-4d7d-ab8d-bbfa74b620c4 | -| user_id | `string` | User id assigned by the customer

Example: user.001 | -| template_id | `string` | Template ID of the room

Example: 66112497abcd52312556c4gg | -| user_name | `string` | User name of the user

Example: Test user | -| user_data | `string` | User data of the user

Example: `{"isHandRaised":true}` | -| role | `string` | Role of the user

Example: host | -| left_at | `timestamp (in UTC)` | Timestamp when user left

Example: 2020-11-11T17:32:17Z | -| duration | `int` | Duration the user spent in the room in seconds

Example: 36000 | -| error_message | `string` | Reason for failure

Example: Peer not joined | -| joined_at | `timestamp (in UTC)` | Timestamp when user joined

Example: 2020-11-11T16:32:17Z | +| Name | Type | Description | +|:--------------|:---------------------|:------------------------------------------------------------------------------------------------------------------------------| +| room_id | `string` | 100ms assigned room id

Example: 5f9edc6ac238215aec2312df | +| room_name | `string` | Room name provided when creating the room

Example: Test Room | +| peer_id | `string` | 100ms assigned id to identify the joining user

Example: bd0c76fd-1ab1-4d7d-ab8d-bbfa74b620c4 | +| user_id | `string` | User id assigned by the customer

Example: user.001 | +| template_id | `string` | Template ID of the room

Example: 66112497abcd52312556c4gg | +| user_name | `string` | User name of the user

Example: Test user | +| user_data | `string` | User data of the user

Example: `{"isHandRaised":true}` | +| role | `string` | Role of the user

Example: host | +| left_at | `timestamp (in UTC)` | Timestamp when user left

Example: 2020-11-11T17:32:17Z | +| duration | `int` | Duration the user spent in the room in seconds

Example: 36000 | +| error_message | `string` | Reason for failure

Example: Peer not joined | +| joined_at | `timestamp (in UTC)` | Timestamp when user joined

Example: 2020-11-11T16:32:17Z | +| type | `string` | Defines the type of peer to join the room. It is 'sip' for peers joining through SIP and 'regular' for peers joining directly | #### Peer leave failure Reason @@ -354,6 +361,7 @@ This event will be sent when the peer leave fails. This can occur when, "room_name": "**********", "user_id": "************************", "template_id": "************************", + "type": "regular", "user_name": "********", "user_data": "", "error_message": "Peer not joined" diff --git a/docs/server-side/v2/how-to-guides/enable-transcription-and-summary.mdx b/docs/server-side/v2/how-to-guides/enable-transcription-and-summary.mdx index c6a26f05a1..eb8807afe0 100644 --- a/docs/server-side/v2/how-to-guides/enable-transcription-and-summary.mdx +++ b/docs/server-side/v2/how-to-guides/enable-transcription-and-summary.mdx @@ -1,9 +1,9 @@ --- -title: Enable Transcription and Summarisation +title: Post Call Transcription and Summarization nav: 6 --- -This is a guide to enable 100ms **post call transcription** with **speaker labels** and **AI-generated summary**. The feature is currently in `Beta`. +This is a guide to enable 100ms **post call transcription** with **speaker labels** and **AI-generated summary**. In case you're looking to enable **live transcription**, refer to this [documentation](/server-side/v2/how-to-guides/live-transcription-hls). @@ -35,7 +35,7 @@ The above flowchart shows the entire workflow of transcript and summary generati You can enable transcription for all the rooms under a particular template. 1. Access an existing template via the sidebar. -2. Navigate to the `Transcription (Beta)` tab in the template configuration. +2. Navigate to the `Transcription` tab in the template configuration. 3. In the second card which says `Post Call Transcription`, enable the `Transcribe Recordings` toggle. 4. Enabling `Post Call Transcription` will expose an extra configuration called `Output Modes` just below the toggle. File format of the transcript output can be set using this. Following file formats are offered: - Text (.txt) @@ -43,7 +43,7 @@ You can enable transcription for all the rooms under a particular template. - Structured (.json) The example output for the above can be seen [here](#example-output-files). -5. In the same card, enable the `Summarise Transcripts` toggle. This will take the default settings for summary. +5. In the same card, enable the `Summarize Transcripts` toggle. This will take the default settings for summary. 6. Save the configuration. 7. Join a room to initiate a session. Start recording (or live stream with recording enabled) using the SDK or API. If it's your first time joining a 100ms room, you'll find the option to `Start Recording` in the created room. For more information on creating room templates, refer to [this documentation](/server-side/v2/api-reference/policy/create-template-via-dashboard). @@ -309,7 +309,11 @@ You can always use 100ms’ Recording Assets API to access the transcripts and s This is not possible at this point of time. But we intend to bring the functionality of re-running transcription and summarization functions to enable users to test and build their own summaries. -4. **What can be done if the speaker label is not working?** +4. Is this a chargeable feature? + + Yes, these are chargeable features. To know more, check our [pricing page](http://100ms.live/pricing). + +5. **What can be done if the speaker label is not working?** There are two possible options here: 1. If you are using an older webSDK version, please update to the latest. Refer to our web documentation [here](/javascript/v2/release-notes/release-notes). diff --git a/lib/publishEvents.ts b/lib/publishEvents.ts new file mode 100644 index 0000000000..4e69a5383f --- /dev/null +++ b/lib/publishEvents.ts @@ -0,0 +1,154 @@ +import * as amplitude from '@amplitude/analytics-browser'; +import { getUtmParams } from './getUtmParams'; +import { currentUser } from './currentUser'; + +const getCommonOptions = () => ({ + dashboard_version: process.env.REACT_APP_DASHBOARD_VERSION, + events_protocol: process.env.REACT_APP_EVENTS_PROTOCOL, + timestamp: new Date().toString(), + platform: '100ms-docs', + ...getUtmParams() +}); + +// page analytics + +const hubspotPageView = () => { + const path = window.location.pathname; + // eslint-disable-next-line @typescript-eslint/naming-convention, no-underscore-dangle, no-multi-assign + const _hsq = (window._hsq = window._hsq || []); + if (_hsq) { + _hsq.push(['setPath', path]); + _hsq.push(['trackPageView']); + } +}; + +// identify analytics +const hubspotIdentify = ({ properties }: { properties: {} }) => { + // eslint-disable-next-line @typescript-eslint/naming-convention, no-underscore-dangle, no-multi-assign + const _hsq = (window._hsq = window._hsq || []); + if (_hsq) { + _hsq.push(['identify', { properties }]); + } +}; + +export const analyticsStore: { + data: { workspaceOwnerEmail: null }; + set: (payload: {}) => void; + get: () => {}; +} = { + data: { workspaceOwnerEmail: null }, + set: (payload) => { + for (const index in payload) { + if (Object.prototype.hasOwnProperty.call(payload, index)) { + analyticsStore.data[index] = payload[index]; + } + } + }, + get: () => analyticsStore?.data +}; + +const analyticsTrack = (title, options) => { + try { + const user = currentUser(); + if (!user) { + amplitude.track({ + event_type: title, + event_properties: { + ...getCommonOptions(), + ...options + } + }); + } else if (user && !user.is_admin) { + amplitude.track({ + event_type: title, + event_properties: { + email: user.email, + customer_id: user.customer_id, + + workspaceOwnerEmail: (analyticsStore.get() as { workspaceOwnerEmail: string }) + ?.workspaceOwnerEmail, + api_version: user.api_version, + ...getCommonOptions(), + ...options + } + }); + } + } catch (e) { + console.error(e); + } +}; + +const analyticsPage = (title, options) => { + const user = currentUser(); + if (!user) { + try { + hubspotPageView(); + } catch (e) { + console.error(e); + } + try { + window.analytics.page(title, { + ...getCommonOptions(), + ...options + }); + } catch (e) { + console.error(e); + } + } else if (user && !user.is_admin) { + try { + window.analytics.page(title, { + email: user.email, + customer_id: user.customer_id, + api_version: user.api_version, + ...getCommonOptions(), + ...options + }); + } catch (e) { + console.error(e); + } + try { + hubspotPageView(); + } catch (e) { + console.error(e); + } + } +}; + +const amplitudeIdentify = (userId, properties = {}) => { + amplitude.setUserId(userId); + const identifyEvent = new amplitude.Identify(); + for (const key in properties) { + if (Object.prototype.hasOwnProperty.call(properties, key)) { + identifyEvent.set(key, properties[key]); + } + } + amplitude.identify(identifyEvent); +}; + +const analyticsIdentify = (id, options = {}) => { + const user = currentUser(); + if (!user || (user && !user.is_admin)) { + const finalOptions = { + ...getCommonOptions(), + ...options + }; + try { + hubspotIdentify({ + properties: { ...finalOptions, refId: id, email: user.email, ...user } + }); + } catch (e) { + console.error(e); + } + try { + amplitudeIdentify(id, finalOptions); + } catch (e) { + console.error(e); + } + } +}; + +export const AppAnalytics = { + identify: analyticsIdentify, + track: analyticsTrack, + page: analyticsPage +}; diff --git a/package.json b/package.json index ba44669d1e..138a7e2af9 100644 --- a/package.json +++ b/package.json @@ -89,6 +89,7 @@ "dependencies": { "@100mslive/react-icons": "0.4.1-alpha.0", "@100mslive/react-ui": "0.4.1-alpha.0", + "@amplitude/analytics-browser": "^2.11.6", "@headlessui/react": "^1.4.0", "@radix-ui/react-select": "^1.2.0", "algoliasearch": "^4.14.3", diff --git a/pages/_app.tsx b/pages/_app.tsx index 8c401ec307..b7c743e1d7 100644 --- a/pages/_app.tsx +++ b/pages/_app.tsx @@ -4,6 +4,7 @@ import { DefaultSeo } from 'next-seo'; import dynamic from 'next/dynamic'; import NProgress from 'nprogress'; import FallbackLayout from '@/layouts/FallbackLayout'; +import * as amplitude from '@amplitude/analytics-browser'; import SEO from '../next-seo.config'; import { currentUser } from '../lib/currentUser'; import '@/styles/custom-ch.css'; @@ -11,11 +12,14 @@ import '@/styles/utils.css'; import '@/styles/nprogress.css'; import '@/styles/theme.css'; import 'inter-ui/inter.css'; +import { AppAnalytics } from '../lib/publishEvents'; declare global { interface Window { // eslint-disable-next-line @typescript-eslint/no-explicit-any analytics: any; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + _hsq: any; } } @@ -29,8 +33,13 @@ const Application = ({ Component, pageProps }) => { const userDetails = currentUser(); const [count, setCount] = useState(0); React.useEffect(() => { + amplitude.init(process.env.NEXT_PUBLIC_AMPLITUDE_API_KEY || '', { + autocapture: { + pageViews: true + } + }); if (!!userDetails && Object.keys(userDetails).length !== 0 && count === 0) { - window.analytics.identify(userDetails.customer_id); + AppAnalytics.identify(userDetails.customer_id, { ...userDetails }); setCount(count + 1); } }, [userDetails]); diff --git a/pages/_document.tsx b/pages/_document.tsx index e92467fa46..58cbb03526 100644 --- a/pages/_document.tsx +++ b/pages/_document.tsx @@ -39,24 +39,11 @@ class MyDocument extends Document { /> {/* To Avoid Flickering */}