diff --git a/pages/blog/[slug].tsx b/components/Presentational/SingleBlog/Blog.tsx similarity index 73% rename from pages/blog/[slug].tsx rename to components/Presentational/SingleBlog/Blog.tsx index 9bf0fe98..2925de02 100644 --- a/pages/blog/[slug].tsx +++ b/components/Presentational/SingleBlog/Blog.tsx @@ -1,18 +1,11 @@ import { useEffect } from 'react' -import Head from 'next/head' import { useRouter } from 'next/router' -import fs from 'fs' import Image from 'next/image' import { LinkedinShareButton, FacebookShareButton } from 'next-share' import { Box, Container, Stack, Typography } from '@mui/material' -import { joinUsLinkIcons, newsAndBlogs } from '../../data/data' -import { BlogPostWithContent, Blog } from '../../types/interfaces' -import { BLOGS_PATH } from '../../utils/constants' -import { - getMarkDownSingleData, - getMarkdownSinglePath, -} from '../../utils/markdown' -import MarkdownText from '../../components/MarkdownText' +import { joinUsLinkIcons, newsAndBlogs } from '../../../data/data' +import MarkdownText from '../../MarkdownText' +import { BlogPost } from '../../../types/interfaces' import Prism from 'prismjs' import 'prismjs/themes/prism-tomorrow.css' import 'prismjs/components/prism-python' @@ -25,30 +18,25 @@ import 'prismjs/components/prism-typescript' import 'prismjs/plugins/normalize-whitespace/prism-normalize-whitespace' import 'prismjs/components/prism-markup-templating' -export default function BlogDetailPage({ - frontmatter: { title, date, cover_image, author }, - content, -}: BlogPostWithContent) { +export default function Blog({ blog }: { blog: BlogPost }) { const router = useRouter() useEffect(() => { Prism.highlightAll() }, []) + if (!blog) return

Loading...

+ return ( <> - - {title} - - - {title} + {blog.title} - {date} + {blog.published_on} 5 min read
- {content} + + {blog.content_md ? blog.content_md : blog.content} +
- Written by {author} + + Written by {blog.blogger} + ) } - -export async function getStaticPaths() { - return getMarkdownSinglePath(fs, BLOGS_PATH) -} - -export async function getStaticProps({ params: { slug } }: Blog) { - return getMarkDownSingleData(fs, BLOGS_PATH, slug) -} diff --git a/components/Shared/layout/Layout.tsx b/components/Shared/layout/Layout.tsx index e7761b20..a518b5ab 100644 --- a/components/Shared/layout/Layout.tsx +++ b/components/Shared/layout/Layout.tsx @@ -8,7 +8,7 @@ const Layout = ({ children }: Props) => { return (
- {children} +
{children}
) diff --git a/components/Shared/layout/layout.scss b/components/Shared/layout/layout.scss index b7719191..abba228a 100644 --- a/components/Shared/layout/layout.scss +++ b/components/Shared/layout/layout.scss @@ -1,5 +1,10 @@ -@media only screen and (max-width: 550px) { - .responsiveness { - width: 700px; - } +.responsiveness { + display: flex; + flex-direction: column; + min-height: 100vh; + width: 100%; +} + +.main-content { + flex: 1; } diff --git a/data/blogdata.js b/data/blogdata.js new file mode 100644 index 00000000..e41baf35 --- /dev/null +++ b/data/blogdata.js @@ -0,0 +1,374 @@ +export const data = [ + { + name: 'annual-lunch-2023', + creation: '2023-08-22 11:00:48.067143', + modified: '2023-08-22 11:04:11.869189', + modified_by: 'umair@prixite.com', + owner: 'umair@prixite.com', + docstatus: 0, + idx: 0, + title: 'Annual Lunch 2023', + blog_category: 'activities', + blogger: 'Waleed Raza', + route: 'blog/activities/annual-lunch-2023', + read_time: 3, + published_on: '2023-03-30', + published: 1, + featured: 0, + hide_cta: 0, + enable_email_notification: 1, + disable_comments: 0, + disable_likes: 0, + blog_intro: + "Smiles, get-together, hot lava cakes, favorite dishes, bright winter sun - here's a glimpse of Prixite's Annual Lunch 2023!", + content_type: 'Markdown', + content: null, + content_md: + 'The Sun sparkled as I hustled through the busy roads of Lahore to reach the venue. As usual, I was late! But what was making me more nervous was that I was going to see several completely new faces. In fact all of them! As I entered the courtyard of the restaurant, several people were sitting around a long table. “Are these the ones”? I questioned myself, staring at them trying to match the faces with those on the laptop screens. Smiling oddly, I kept searching for one reaction showing that this was my organization!\n\nYes, I was attending the Annual Lunch of my organization which I had recently joined. After much embarrassment, someone towards the end of the table raised hands and I saw one University Junior that I knew. What a relief that was! Everyone seemed so different from their profile pictures! It took a while to finally settle in and have brief Introductions! Oh wait, let’s take a picture first!\n\n![Photo 1](/images/posts/lunch/photo1.jpg)\n\nWhen it comes to Remote Organizations, maintaining a healthy relationship and good work culture is always a hefty task. But those who know the trick, are always able to pull it off like a pro! And this was something I was witnessing right now. Slowly, Introductions kicked in and I became comfortable as little conversations started.\n\n![Photo 2](/images/posts/lunch/photo2.jpg)\n\nEveryone then placed their orders. Discussions on current topics came in and ChatGPT was at the top! We all laughed if ChatGPT would be replacing us soon but the CEO assured us not to worry! During the same, I had the chance to talk to Umair and make an immediate plan for introductions of everyone! After we were done with the sizzling food, sizzling cakes, and brownies were ordered along with tea and coffee. And the cold sunlight, under the sun, made it a perfect combination!\n\n![Lava Cake](/images/posts/lunch/lava.png#image-30)\n\nBefore it was time to go, I announced a surprise! The Gift Boxes from Prixite started coming in and everyone looked at them in awe! “While the CEO hands you the gift, you have to give an introduction of yourself and that must include your hobbies or activities apart from professional life!” I chanted the announcement and gathered everyone to a side. It turned out to be a perfect activity, as we were getting to know a different side of everyone! Surprisingly, half of the team loved playing cricket. The boxed gift followed by the Introductions made everyone smile brightly.\n\n![Gift](/images/posts/lunch/gift.jpg#image-30)\n\nIt’s always lovely when remote organizations put effort into arranging such events, bringing people together and helping them bond! Teams are built stronger that way. And this was what Prixite was solely focused on. It reassures the employees that their organization actually exists and so do the colleagues they were talking to over slack. Moreover, one gets to know each other better which is always impossible over a professional call. And with that, the goodbyes kicked in and everyone started leaving slowly. When I left, the sun was setting: an end to a perfect day, with a lot of memories.\n\n![Group Photo](/images/posts/lunch/group_photo.jpg)\n', + content_html: null, + email_sent: 0, + meta_title: 'Annual Lunch 2023', + meta_description: + "Smiles, get-togethers, hot lava cakes, favorite dishes, bright winter sun - here's a glimpse of Prixite's Annual Lunch 2023!", + meta_image: 'images/posts/lunch/group_photo.png', + _user_tags: null, + _comments: null, + _assign: null, + _liked_by: null, + }, + { + name: 'blogger-api-blog', + creation: '2023-08-22 10:55:59.511825', + modified: '2023-08-22 10:57:01.707564', + modified_by: 'umair@prixite.com', + owner: 'umair@prixite.com', + docstatus: 0, + idx: 0, + title: 'Blogger API Blog', + blog_category: 'blogger-api', + blogger: 'Aleesha Azhar', + route: 'blog/blogger-api/blogger-api-blog', + read_time: 4, + published_on: '2023-07-14', + published: 1, + featured: 0, + hide_cta: 0, + enable_email_notification: 1, + disable_comments: 0, + disable_likes: 0, + blog_intro: + 'The Blogger API is a powerful tool that allows developers to interact with the Blogger platform, enabling them to create, manage, and access blog content.', + content_type: 'Markdown', + content: null, + content_md: + '####Introduction\n\nBlogger API is a powerful tool provided by Google that allows developers to access and interact with blogger blog data. In today\'s digital era, bloggers play a vital role in sharing information, experiences, and opinions with a wide audience. blogging has become an essential part of online content creation. Many content creators and developers use Google\'s Blogger platform to share their thoughts, ideas, and expertise with a broader audience. Integrating the Blogger API with your app can streamline the process of creating, managing, and publishing blog posts directly from your application.\n\nIntegrating the Blogger API with an app allows you to interact with the Blogger platform, enable you to create, read, update, and delete blog posts and comments. Here\'s an overview of Blogger API and how you can integrate the Blogger API with an app:\n\n####Blogger API Concepts\n\nBlogger is built on five basic concepts:\n\nBlogs: Blog has posts and pages. This is the container for blog meta-information like blog name and Description.\n\nPosts: A blog post is the publishable data that the blog author has created. This information is meant to be timely, what the authors want to publish to the world now.\n\nComments: A comment is the place where people other than the blog post author react.\n\nPages: A page is a place for static content, such as biographical information, or the ways to contact the user.\n\nUsers: A user is someone who interacts with Blogger, be they acting as an Author, an Administrator, or just a Reader. For public blogs, readers may be anonymous, but on private blogs a reader must be identified by Blogger.\n\n####Blogger API data model\nA resource is an individual data entity with a unique identifier. The Blogger JSON API operates on five types of resources:\n\nBlogs resource: Its represent a blog.\n\nPosts resource: Its represent a post, each posts resource is a child of a blogs resource.\n\nComments resource: Its represent a comment on a specific post,each comments resource is a child of a posts resource.\n\nPages resource: Its represent a static page, each pages resource is a child of a blogs resource.\n\nUsers resource: Its represent a user. This is used to identify the Author of a page, post, or comment.\n\n
\n Photo 1\n
\n####Blogger API operations\n\nYou can invoke different methods in the Blogger API, as described in the following table.\n\n
\n Photo 1\n
\n\n####Prerequisites\n\nBefore we begin, make sure you have the following prerequisites:\n\n1. A Google account with access to Blogger (https://www.blogger.com).\n2. Basic knowledge of web development and APIs (Application Programming Interfaces).\n3. An IDE (Integrated Development Environment) or text editor of your choice.\n\n####step-by-step process to integrate Blogger API with an APP\n\n#####Step 1: Creating your Project\n\nTo access the Blogger API, you need to obtain API Credential. Follow these steps:\n\n1. Go to the Google Developers Console (https://console.developers.google.com/).\n2. Click on "Select a Project" in the top bar and then click "New Project".\n3. Write the name for your project and click "Create."\n4. Once the project is created, then enable the Blogger API.\n5. For enabling Blogger API, search for "Blogger API" in the search bar and click on it.\n6. Click the "Enable" button to activate the Blogger API for your project.\n\n#####Step 2: create API Credentials\nTo interact with the Blogger API, you need API credentials. Follow these steps to create credentials:\n\n1. Click on "Credential Page"\n
\n Photo 1\n
\n2. After selecting Project, Click "Next" to generate your API-KEY.\n
\n Photo 1\n
\n3. When click on show key its shows the generated API key. Copy the generated API Key. You will use it later in your project.\n
\n Photo 1\n
\n\n#####Step 3: Creating your Development Environment\n\nNow that you have your API credentials, it\'s time to set up your development environment. Create a new directory for your project and run the following commands:\n\n1. Initialize a new project\n2. Install the required dependencies\n\n#####Step 4: Make API Requests:\n\n######you can now make API requests to interact with Blogger.\n\n1. Depending on your app\'s requirements, you can create, read, update and delete blog posts.\n\n######Adding Blog Post\nYou can add a post for a blog by sending POST request to the post collection URI.The URI for a blog has the following format:\n\n```javascript\nPOST https://www.googleapis.com/blogger/v3/blogs/2399952233233/posts\n```\n\n######Updating Blog Post\nYou can update a post for a blog by sending a PUT request to the post resource URI. The URI for a blog has the following format:\n\n```javascript\nPUT https://www.googleapis.com/blogger/v3/blogs/2399952233233/posts\n```\n\n######Deleting Blog Post\nYou can delete a post for a blog by sending a DELETE request to the post resource URI. The URI for a blog has the following format:\n\n```javascript\nDELETE https://www.googleapis.com/blogger/v3/blogs/2399952233233/posts\n```\n\n######Retrieving Blog Post\nYou can retrieve information for a particular blog by sending an HTTP GET request to the blog\'s URI. The URI for a blog has the following format:\n\n```javascript\nGET https://www.googleapis.com/blogger/v3/blogs/2399952233233/posts\n```\n\n2. Refer to the documentation for the specific client library to learn about the available methods and endpoints.\n\n#####Step 5: Handle Responses:\n\n1. Process the responses received from the Blogger API according to your app\'s needs.\n2. Handle errors, validate data, and extract relevant information from the API responses.\n\n####Conclusion\n\nIntegrating the Blogger API with your app opens up a world of possibilities for blog management and content creation. By following the steps outlined in this guide, you can seamlessly incorporate the power of Blogger into your application. Remember to consult the official documentation(https://developers.google.com/blogger), as it will provide detailed guidance and code examples to facilitate the integration process.\n', + content_html: null, + email_sent: 0, + meta_title: 'Blogger API Blog', + meta_description: + 'The Blogger API is a powerful tool that allows developers to interact with the Blogger platform, enabling them to create, manage, and access', + meta_image: '/images/posts/bloggerapi/blogger.jpg', + _user_tags: null, + _comments: null, + _assign: null, + _liked_by: null, + }, + { + name: 'integrating-scrapy-with-django', + creation: '2023-08-22 11:06:39.228814', + modified: '2023-08-22 11:06:46.397601', + modified_by: 'umair@prixite.com', + owner: 'umair@prixite.com', + docstatus: 0, + idx: 0, + title: 'Integrating Scrapy with Django', + blog_category: 'scrappy', + blogger: 'Aiza Tariq', + route: 'blog/scrappy/integrating-scrapy-with-django', + read_time: 3, + published_on: '2023-06-20', + published: 1, + featured: 0, + hide_cta: 0, + enable_email_notification: 1, + disable_comments: 0, + disable_likes: 0, + blog_intro: + "In today's data-driven world, web scraping plays a crucial role in gathering information from various websites.", + content_type: 'Markdown', + content: null, + content_md: + '\n
\n
Integrating Scrapy with Django: A Powerful Combination for Web Scraping and Web Application Development
\n\n
In today\'s data-driven world, web scraping plays a crucial role in gathering information from various websites. Data ranging from contacts and emails to anything versatile like property details or product Information can be scrapped to build custom solutions\n\nSimultaneously, web application development using frameworks like Django allows us to build robust and scalable applications. Integrating Scrapy; a powerful web scraping framework, with Django, can provide a seamless workflow for extracting and integrating data into web applications. Let’s explore the process of integrating Scrapy with Django.
\n\n
Step 1: Create and activate the virtual environment
\n\n
\n

python3 -m venv myenv

\n

source myenv/bin/activate

\n
\n\nwhere myenv is the name of the virtual environment\n\n
Step 2: Install Django
\n\nYou can install Django using the following command:\n\n

pip install django

\n\n
Step 3: Create a Django Project
\n\nRun the following command to create a new Django project:\n\n

django-admin startproject project

\n\nReplace "project" with the desired name for your Django project.\n\n
Step 4: Create a new Django App
\n\nNavigate into the project directory using the command:\n\n

cd project

\n\nRun the following command to create a new Django app:\n\n

python3 manage.py startapp app

\n\nReplace "app" with the desired name for your app.\n\n
Step 5: Configure Django Settings
\n\n- Open the settings.py file located inside the project directory.\n- In the INSTALLED_APPS list, add the name of your app (e.g., \'app\') to include it in the project.\n- Configure other project settings such as database connection, and static files, according to your requirements.\n\n
Step 6: Install Scrapy
\n\nTo begin, ensure that Scrapy is installed in your Python environment. You can install it using the following command:\n\n

pip install scrapy

\n\n
Step 7: Create a Scrapy Project
\n\nNavigate to the root folder of your Django project and create a Scrapy project. Run the following command:\n\n

scrapy startproject scraper

\n\nReplace "scraper" with the desired name for your Scrapy project.\n\nRemove the top-level scraper directory so the structure is as follows:\n\n- manage.py\n- scrapy.cfg\n- scraper\n\n
Step 8: Generate a Spider
\n\nYou can start your first spider with the command:\n\n
\n

cd scraper

\n

scrapy genspider website_crawler domain

\n
\n\nReplace "website_crawler" with the desired name for your spider, and "domain" with the target website\'s domain.\n\nOur folder structure will somehow look like this:\n\n
\n folder structure\n
\n\nAs seen, ** project ** is the name of our django project, ** app ** is the name of our django app, ** scraper ** is the name of our scrapy project and ** website_crawler ** is the name of our spider.\n\n
Step 9: Connect Scrapy to Django
\n\nTo access Django models from Scrapy, establish a connection between the two frameworks. Open the "settings.py" file within the "scraper" folder and add the following code snippet:\n\n
\n

import os

\n

import sys

\n

import django

\n
\n\n

\\# Django Integration

\n\n
\n

sys.path.append(os.path.dirname(os.path.abspath(".")))

\n

os.environ["DJANGO_SETTINGS_MODULE"] = "project.settings"

\n

django.setup()

\n
\n\n
Step 10: Uncomment Pipeline Configuration:
\n\nTo enable the pipeline for processing scraped items and interacting with Django models, uncomment the following lines in the "settings.py" file within the "scraper" folder:\n\n
\n

ITEM_PIPELINES = {

\n

"scraper.pipelines.ScraperPipeline": 300,

\n

}

\n
\n\n
Step 11: Start the Spider
\n\nYou can now run your Scrapy spider from the root directory of your django project with the following command:\n\n

scrapy crawl scraper

\n\nReplace "scraper" with the name of your Scrapy spider.\n\nFollowing these steps will successfully integrate Scrapy with your Django project. Your Scrapy Spider can now access and interact with Django models, enabling seamless data extraction and integration into your Django application. Test it out and leave a comment about how it went for you! Happy Scraping!\n\n
\n', + content_html: null, + email_sent: 0, + meta_title: 'Integrating Scrapy with Django', + meta_description: + "In today's data-driven world, web scraping plays a crucial role in gathering information from various websites.", + meta_image: '/images/posts/integratingscrapy/cover.png', + _user_tags: null, + _comments: null, + _assign: null, + _liked_by: null, + }, + { + name: 'mothers-day', + creation: '2023-08-22 11:08:12.775372', + modified: '2023-08-22 11:08:54.220959', + modified_by: 'umair@prixite.com', + owner: 'umair@prixite.com', + docstatus: 0, + idx: 0, + title: "Mother's Day", + blog_category: 'annual-days', + blogger: 'Waleed Raza', + route: 'blog/annual-days/mothers-day', + read_time: 3, + published_on: '2023-05-14', + published: 1, + featured: 0, + hide_cta: 0, + enable_email_notification: 1, + disable_comments: 0, + disable_likes: 0, + blog_intro: + 'Words are bound to fall short when it comes to Mothers and what they go through their entire life for their children.', + content_type: 'Markdown', + content: null, + content_md: + "\nWords are bound to fall short when it comes to Mothers and what they go through their entire life for their children. To sum it up in one day is impossible so Mother’s Day is just a day to highlight how Important Mothers are to us! In our daily lives, we get so busy that often we tend to neglect the little gestures mothers give us and how she cares for us, asking about our health and always trying to serve us food! So Prixite came up with an amazing Idea and asked all its employees to take their Mothers out for dinner to spend some quality time while relieving her of kitchen work!\n\nHere is a glimpse of how it went from our employees' eyes! Let’s live in the moment and hear how the day with their mother went!\n\n##### Uzair\n\nIt was a delightful evening with my mother, filled with warmth and love. We decided to celebrate at a restaurant with special memories for our family.\nThe atmosphere was lovely. The restaurant had prepared a special Mother’s Day menu, offering various dishes. The food was amazing and we enjoyed every bite. It was a time of appreciating the love and support my mother has always given me. I am grateful for the opportunity to spend quality time with my mother.\n\n![Photo 1](/images/posts/motherday/photo1.jpg#image-40)\n\n##### Nabeel\n\nI would like to express my heartfelt gratitude for the wonderful Mother's Day hi-tea experience arranged by Prixite. It provided an exceptional opportunity for me to show my appreciation to my mother, who means the world to me.\n\nWe chose Spice Bazaar, as it is my mother's favorite place for hi-tea because of the ambiance and culinary delights. Prixite's thoughtful initiative allowed me to express my gratitude for all that my mother does for me, even though I know I can never fully repay her.\n\nI truly value the company's gesture of appreciation towards my super mom, and I wanted to extend my kudos to Prixite for organizing such a brilliant gesture.\n\n![Photo 2](/images/posts/motherday/photo2.jpg#image-40)\n\n##### Waleed\n\nWhen I told my mother, we would go out, she could not believe it for a while but was very excited. I took my Mother to Golden Bowl, a Chinese restaurant. She had not been there first, so I decided to do something different. The ambiance was super warm and the aesthetics were just the ones mom would love! The food was super great and everyone loved it! For me, it was a great opportunity to bond with my mother and spend time with her!\n\n![Lava Cake](/images/posts/motherday/photo3.jpg#image-20)\n\n##### Maria\n\nMy mother preferred staying at home rather than going out, so I wanted to organize something special for her on Mother's Day. She loves Tehzeeb pizza, so I thought it would be a great idea to have a delicious pizza dinner. It was a wonderful treat for her on her special day, and we thoroughly enjoyed the meal. Thank you so much, Prixite!\n\n![Gift](/images/posts/motherday/photo4.png#image-20)\n\n##### Fahad\n\nOn Mother's Day, I had the most wonderful experience celebrating with my family. We spent the day together, expressing our love and gratitude for my amazing mother. We started the day with a delicious homemade breakfast, followed by some thoughtful gifts. In the afternoon, we went for a leisurely walk in the park, enjoying the beautiful weather and cherishing each other's company. It was a day filled with laughter, hugs, and precious moments that I will treasure forever.\n\nWhat made this Mother's Day even more special was the generosity of my company, Prixite. The company's gesture demonstrated its commitment to work-life balance and its appreciation for the importance of family. I am grateful for the support and proud to be part of an organization that values its employees and acknowledges the significance of special occasions like Mother's Day.\n\n##### Naila\n\nThis time my Mother's Day celebration was truly amazing, as I told my mom that my company is going to cover for our treat and she thought that I was kidding with her! We enjoyed a great evening together, and had a nice dinner at Bozo Lahore, followed by a sweet treat at home. It was a lovely gesture from Prixite!\n\n![Group Photo](/images/posts/motherday/photo5.jpg#image-20)\n\nSuch sweet memories garnished, thanks to Mother’s Day. Always remember, never wait for this specific day to express your love for your mother!\n", + content_html: null, + email_sent: 0, + meta_title: "Mother's Day", + meta_description: + 'Words are bound to fall short when it comes to Mothers and what they go through their entire life for their children.', + meta_image: '/images/posts/motherday/mDayCover.png', + _user_tags: null, + _comments: null, + _assign: null, + _liked_by: null, + }, + { + name: 'redux-toolkit-vs-context-api-in-react-a-comprehensive-comparison', + creation: '2023-08-22 10:48:16.544048', + modified: '2023-08-22 10:49:04.600479', + modified_by: 'umair@prixite.com', + owner: 'umair@prixite.com', + docstatus: 0, + idx: 0, + title: 'Redux Toolkit vs. Context API in React: A Comprehensive Comparison', + blog_category: 'state-management', + blogger: 'Shaheryar', + route: + 'blog/state-management/redux-toolkit-vs-context-api-in-react-a-comprehensive-comparison', + read_time: 5, + published_on: '2023-07-03', + published: 1, + featured: 0, + hide_cta: 0, + enable_email_notification: 1, + disable_comments: 0, + disable_likes: 0, + blog_intro: + 'State management is a crucial aspect of building robust and scalable React applications. Two popular options for state management in React are Redux Toolkit and Context API. In this blog post, we w', + content_type: 'Markdown', + content: '


', + content_md: + "\nState management is a crucial aspect of building robust and scalable React applications. Two popular options for state management in React are Redux Toolkit and Context API. In this blog post, we will explore the differences between these two technologies, discuss when to use each of them, and provide examples to illustrate their usage. We will also address common state management problems and how these libraries help us solve them.\n\n#### Redux Toolkit\n\nRedux Toolkit is a powerful library that simplifies and optimizes Redux development. It provides a set of tools and abstractions that make working with Redux more intuitive and efficient.\n\nAt its core, Redux Toolkit follows the traditional Redux flow, where actions trigger state changes through reducers and the store holds the application state. However, it eliminates boilerplate code and incorporates best practices, making it easier to set up and maintain a Redux application.\n\nOne of the common problems in state management is managing complex state. As an application grows, the state can become large and deeply nested, making it challenging to handle updates and track changes. Redux Toolkit solves this problem by providing a structured way to define reducers and manage slices of the state. It promotes immutability and ensures that state updates are done in a predictable manner.\n\nAnother common issue is handling asynchronous actions, such as making API calls. Redux Toolkit simplifies this by integrating with middleware libraries like Redux Thunk and Redux Saga. These middleware allow you to write asynchronous logic and dispatch actions with ease, making it straightforward to handle side effects in your application.\n\nHere's an example of setting up Redux Toolkit in a React application:\n\n```javascript\n// store.js\nimport { configureStore } from '@reduxjs/toolkit'\nimport counterReducer from './counterSlice'\n\nconst store = configureStore({\n reducer: {\n counter: counterReducer,\n },\n})\n\nexport default store\n```\n\nIn the above example, we create a Redux store using configureStore from Redux Toolkit. We pass an object to the reducer property, where counter is a key representing our counter state slice, and counterReducer is the corresponding reducer function responsible for handling state updates.\n\n#### Counter Slice Setup\n\n```javascript\n// counterSlice.js\nimport { createSlice } from '@reduxjs/toolkit'\n\nconst counterSlice = createSlice({\n name: 'counter',\n initialState: 0,\n reducers: {\n increment: (state) => state + 1,\n decrement: (state) => state - 1,\n },\n})\n\nexport const { increment, decrement } = counterSlice.actions\nexport default counterSlice.reducer\n```\n\nIn the above example, we define a counter slice using createSlice from Redux Toolkit. The name property represents the name of the slice, and the initialState property sets the initial value of the counter. The reducers object defines the actions and their corresponding state update logic.\n\nRedux Toolkit simplifies the process of creating actions and reducers by automatically generating action creators based on the defined reducers. In this example, increment and decrement are the generated action creators.\n\n#### Context API\n\nThe Context API is a built-in feature of React that allows you to share state across components without prop drilling. It provides a simple and straightforward way to manage global state in your application.\n\nOne of the problems that the Context API solves is the need to share global state across components. In larger applications, passing props down multiple levels of the component tree can become cumbersome and lead to prop drilling. The Context API allows you to create a context and provide values to it, which can be accessed by any component within the context's provider.\n\nLet's set up the same counter app using the Context API:\n\n#### Context Setup\n\n```javascript\n// CounterContext.js\nimport React, { createContext, useState } from 'react'\n\nexport const CounterContext = createContext()\n\nexport const CounterProvider = ({ children }) => {\n const [counter, setCounter] = useState(0)\n\n const increment = () => {\n setCounter((prevCounter) => prevCounter + 1)\n }\n\n const decrement = () => {\n setCounter((prevCounter) => prevCounter - 1)\n }\n\n return (\n \n {children}\n \n )\n}\n```\n\nIn the above example, we create a CounterContext using createContext from React. We also create a CounterProvider component that holds the state and provides the counter, increment, and decrement values to the context.\n\n#### Consuming the Context\n\nTo consume the context values in a component, we use the useContext hook:\n\n```javascript\nimport React, { useContext } from 'react'\nimport { CounterContext } from './CounterContext'\n\nconst Counter = () => {\n const { counter, increment, decrement } = useContext(CounterContext)\n\n return (\n
\n

Counter: {counter}

\n \n \n
\n )\n}\n\nexport default Counter\n```\n\nHowever, the Context API has its limitations. One common challenge is performance degradation when deeply nested components consume context values. Each time a context value changes, all components consuming that context are re-rendered. This can lead to unnecessary re-renders in components that don't depend on the context value. To mitigate this, you can use memoization techniques or opt for more specialized state management solutions like Redux Toolkit.\n\n#### Comparing Redux Toolkit and Context API\n\nWhile Redux Toolkit and the Context API both offer state management solutions in React, there are key differences between them. The choice between the two depends on the specific requirements of your project.\n\n##### When to use Redux Toolkit\n\nRedux Toolkit is a great choice when your application has complex state management needs and requires advanced features and tools. Some scenarios where Redux Toolkit shines include:\n\n- Managing large or deeply nested state trees: Redux Toolkit provides a structured way to handle state updates and ensures a predictable flow of data.\n- Implementing middleware for handling asynchronous actions: Redux Toolkit integrates seamlessly with middleware libraries like Redux Thunk and Redux Saga, making it easy to handle asynchronous logic.\n- Leveraging powerful debugging tools like the Redux DevTools Extension: Redux Toolkit comes with built-in support for the Redux DevTools Extension, which allows you to inspect and time-travel through state changes.\n\n##### When to use the Context API\n\nThe Context API is a suitable choice for smaller applications with simple state management needs. It provides a lightweight solution without the need for additional dependencies or complex setup. Consider using the Context API when:\n\n- Your application has a limited amount of shared state: The Context API is well-suited for managing a small number of context values across your application.\n- You want to avoid the overhead of setting up Redux for smaller projects: The Context API provides a simple and straightforward way to manage state without the additional setup required by Redux.\n- Prop drilling becomes cumbersome in your component hierarchy: The Context API eliminates the need to pass props through multiple levels of components, improving code readability and reducing boilerplate.\n\n#### Conclusion\n\nRedux Toolkit and the Context API are both viable options for state management in React applications. The choice between them depends on the specific needs and complexity of your project.\n\nRedux Toolkit excels in managing complex state, handling asynchronous actions, and providing powerful debugging tools. On the other hand, the Context API is a lightweight solution that simplifies state management for smaller applications with simple needs.\n\nBy using these libraries, you can overcome common state management problems and ensure a scalable and maintainable codebase.\n\nRemember, understanding the trade-offs and considering factors such as project size, complexity, and team requirements will help you make an informed decision when choosing between Redux Toolkit and the Context API.\n", + content_html: null, + email_sent: 0, + meta_title: 'Redux Toolkit vs. Context API in React', + meta_description: + 'State management is a crucial aspect of building robust and scalable React applications. Two popular options for state management in React a', + meta_image: '/images/redux-contextapi.png', + _user_tags: null, + _comments: null, + _assign: null, + _liked_by: null, + }, + { + name: 'rtk-query-vs-react-query', + creation: '2023-08-22 11:10:39.424507', + modified: '2023-08-22 11:11:23.867000', + modified_by: 'umair@prixite.com', + owner: 'umair@prixite.com', + docstatus: 0, + idx: 0, + title: 'RTK Query vs React Query', + blog_category: 'react-query', + blogger: 'Shameen Jamil', + route: 'blog/react-query/rtk-query-vs-react-query', + read_time: 4, + published_on: '2023-07-13', + published: 1, + featured: 0, + hide_cta: 0, + enable_email_notification: 1, + disable_comments: 0, + disable_likes: 0, + blog_intro: + 'RTK Query and React Query: Powerhouse Solutions for State Management in the React Ecosystem.', + content_type: 'Markdown', + content: null, + content_md: + "#### What is RTK query?\n\nRTK Query is a powerful data fetching and caching tool. It is designed to simplify common cases for loading data in a web application, eliminating the need to hand-write data fetching & caching logic yourself. RTK Query is an optional addon included in the Redux Toolkit package, and its functionality is built on top of the other APIs in Redux Toolkit. The Redux core has always been very minimal - it's up to developers to write all the actual logic. That means that Redux has never included anything built in to help solve these use cases. The Redux docs have taught some common patterns for dispatching actions around the request lifecycle to track loading state and request results, and Redux Toolkit's createAsyncThunk API was designed to abstract that typical pattern. However, users still have to write significant amounts of reducer logic to manage the loading state and the cached data.\n\n#### Pros of RTK Query:\n\n
\n - _Caching and Normalization:_ RTK Query provides built-in caching mechanisms to optimize data fetching. It automatically caches API responses and performs intelligent cache invalidation based on the data dependencies.\n - _Automatic Background Refetching:_ RTK Query automatically handles background refetching to ensure your data stays up to date. It can automatically refetch data in the background at configurable intervals or when specific data dependencies change.\n - _Automatic Background Refetching:_ RTK Query automatically handles background refetching to ensure your data stays up to date. It can automatically refetch data in the background at configurable intervals or when specific data dependencies change.\n - _Integration with Redux Toolkit:_ RTK Query integrates seamlessly with Redux Toolkit, leveraging its store, reducers, and middleware. It follows the principles and patterns of Redux Toolkit, making it easy to adopt and integrate into existing Redux applications.\n
\n#### Cons of RTK Query:\n\n
\n- _Limited to use with Redux: _ Unlike other data management solutions, RTK Query can only be used in combination with Redux, thus decreasing its overall adaptability.\n\n- _Steep learning curve for developers new to Redux:_ For developers who are not already familiar with Redux, learning RTK Query may require a steeper learning curve because it requires an understanding of Redux concepts and patterns.\n\n- _Performance Trade-offs:_ RTK Query's automatic caching and background refetching can provide significant performance benefits. However, in certain scenarios with large data sets or complex data relationships, the caching behavior may not be optimal. It's important to carefully configure and test the caching settings to ensure optimal performance for your specific use case.\n
\n\nThis is how we create API Service using createAPI hook\n\n```javascript\n\nimport { createApi, fetchBaseQuery } from '@reduxjs/toolkit/query/react'\nexport const exampleApi = createApi({\n reducerPath: 'exampleApi',\n baseQuery: fetchBaseQuery({ baseUrl: 'https://example.com/api/' }),\n endpoints: (builder) => ({\n getUserByName: builder.query < User, string> ({\n query: (name) => `user/all`,\n }),\n }),\n})\n```\n\n#### What is react query?\n\nReact Query is a ReactJS preconfigured data management library which gives you power and control over server-side state management, fetching, and caching of data, and error handling in a simple and declarative way without affecting the global state of your application. React Query handles caching, background updates and stale data out of the box with zero-configuration. There's no global state to manage, reducers, normalization systems or heavy configurations to understand. It comes wired up with dedicated devtools, infinite-loading APIs, and first class mutation tools that make updating your data a breeze.\n\n#### Pros of react query:\n\n
\n\n- _Simplified Data Fetching:_ React Query simplifies the process of fetching and managing data by providing a declarative and intuitive API. It abstracts away the complexities of making network requests, handling loading and error states, and managing data caching and synchronization.\n\n- _Automatic caching:_ React Query accelerates data retrieval and optimizes network requests by utilizing an automated caching system.\n\n- _Customization:_ React query provides useMutation hook that provides data creation, edition and deletion.\n\n- _Optimized Query Management:_ React-query works best with pagination as well as fetching and re-fetching data on background. React Query optimizes query management by providing features like query deduplication, query batching, and smart query invalidation.\n
\n\n#### Cons of react query:\n\n
\n\n- _Limited functionality:_ React Query is focused on fetching and managing data from APIs, so it may not be suitable for more complex state management needs.\n\n- _Smaller community:_ Relatively new and may have a smaller community and ecosystem compared to other libraries.\n
\nHere is an example of react query:\n\n```javascript\nimport { QueryClient, QueryClientProvider, useQuery } from 'react-query'\nconst queryClient = new QueryClient()\nexport default function App() {\n return (\n \n \n \n )\n}\nfunction Example() {\n const { isLoading, error, data } = useQuery('repoData', () =>\n fetch('https://api.github.com/repos/tannerlinsley/react-query').then(\n (res) => res.json()\n )\n )\n if (isLoading) return 'Loading...'\n if (error) return 'An error has occurred: ' + error.message\n return (\n
\n

{data.name}

\n

{data.description}

\n
\n )\n}\n```\n\n
\n\n#### Conclusion:\n\nIf your application is more extensive and complex and needs complex state management, RTK is a better choice. If you are building a small, simple application and want to minimize the amount of boilerplate code, React Query may be the way to go.\nReact query is faster than RTK because of it small size but RTK provides excellent features for performance improvement.\nReact Query uses a manual cached key for invalidation and caches by user-defined query keys, while RTK Query uses declarative data invalidations and creates cache keys via endpoints and arguments.\nAt the end, it totally depends on you if you want to use redux go for RTK but if your project is simple go for react query.\n", + content_html: null, + email_sent: 0, + meta_title: 'RTK Query vs React Query', + meta_description: + 'RTK Query and React Query: Powerhouse Solutions for State Management in the React Ecosystem.', + meta_image: '/images/posts/rtk_query_vs_react_query/RTK-query-header.png', + _user_tags: null, + _comments: null, + _assign: null, + _liked_by: null, + }, + { + name: 'scooprank-intro-blog', + creation: '2023-08-22 11:13:49.179001', + modified: '2023-08-22 11:14:03.045828', + modified_by: 'umair@prixite.com', + owner: 'umair@prixite.com', + docstatus: 0, + idx: 0, + title: 'ScoopRank Intro Blog', + blog_category: 'scoop-rank', + blogger: 'Umair Khan', + route: 'blog/scoop-rank/scooprank-intro-blog', + read_time: 4, + published_on: '2023-03-30', + published: 1, + featured: 0, + hide_cta: 0, + enable_email_notification: 1, + disable_comments: 0, + disable_likes: 0, + blog_intro: + 'ScoopRank is a deep tech product that ranks entities and tracks trends of entities in real time across internet.', + content_type: 'Markdown', + content: null, + content_md: + "I have always had conversations and debates where one tries to convince the other about the popularity of ‘his’ leader and vice versa! And the result? Always a zero multiplied by zero! No one has any stats and evidence to gauge “popularity” or look at how something is trending against the other. And this is where [ScoopRank](https://www.scooprank.com) silently kicks in!\n\nScoopRank searches for all the internet news and scans it to build its database. With its AI-built mechanism, it easily drives insights and generates real-time scoops! Moreover, it can show trending news items, and users can quickly gauge what’s trending and being followed the most, sorting it over categories or dates!\n\nThe days had passed when you had to visit multiple sources for news. ScoopRank shows authentic news from all sources. News is sorted and ranked and most importantly, real-time; showing a glimpse of what is trending the most! The popularity is gauged in the same way.\n\n![Popularity](/images/posts/scooprank/5_leaders.png#image-70)\n\n##### Key Features of ScoopRank\n\nThis was a very brief Introduction to ScoopRank! Let’s dive deeper into the capabilities of ScoopRank!\n\n1. Scoops\n Every second, news is generated and posted online through different sources. A scoop is formed of the news in the same context but can have numerous different sources. ScoopRank then assigns them categories and writes a summary for each one. e.g. news about Fawad Chaudhary’s bail was published through several sources. But only one scoop will form on the same because all of them have the same context.\n ![Scoop Ranking](/images/posts/scooprank/1_Scoops.png#image-70)\n\n2. Sentiment Analysis\n One of the most powerful features of ScoopRank is the sentiment analysis of the entities. It is pertinent to note that this is not the sentiment of the public but is based on the news article; its tone, references, and where the article leads to. For example, the entity “Imran Khan” was searched and the trends appeared over the timeline. Hovering over one particular date shows the sentiment analysis of the entity for that day.\n ![Sentiment Analysis](/images/posts/scooprank/sentiment.png#image-70)\n\n3. Popularity\n ScoopRank also tracks the popularity of Political Parties and Political Leaders in the media. So, you can determine at a glance if one leader is popular or the other. Similar cases follow for Political Parties.\n ![Political Parties](/images/posts/scooprank/2_Popularity.png#image-70)\n\n4. Trend Search\n Allows users to search for trends of any entity and display the sentiment of that trend. Hence, whatever is in your mind - just look up and ScoopRank will tell you if it's trending in the news or not!\n ![Entity Trend](/images/posts/scooprank/trend.png#image-70)\n\n5. Real-time Ranking\n You can easily rank Scoops based on different periods such as day, week, and month. So, let's say you can see which news was trending on the 1st for the entire month! Well, that sure can help solve some political arguments based on real-time data!\n Not only that, as the scoops keep pouring in, they are ranked on real-time data. So whenever you visit, the ranking of any particular scoop may have changed! It shows the importance of news in public.\n ![Real time ranking](/images/posts/scooprank/rank.png#image-70)\n\n6. Category\n Scoops can also be easily filtered by category. e.g. You can dive into the entertainment category to have an insight into what’s trending there without bothering about the overall picture.\n ![Category Ranking](/images/posts/scooprank/4_Category.png#image-70)\n\n##### Problems ScoopRank will solve?\n\nSo, the features look cool! But is this providing a solution to any problem? Or is it just another piece of software? The answer lies in the capabilities of ScoopRank and the real-world issues it addresses. Here’s how ScoopRank will make your life easier if you’re interested in:\n\n1. Tracking Popularity\n ScoopRank users are individuals or organizations who are interested in tracking the popularity of Political Parties and Leaders. ScoopRank provides a solution for users to access and understand the popularity of these entities in the media easily. And since it is based on real-time data – it solves the problem of authenticity and removes any biasedness factors.\n\n2. Understanding Public Sentiments\n ScoopRank users are business owners or analysts who want to track how the public is perceiving their businesses, products, or services. ScoopRank solves the problem of providing a way for these users to easily access and understand the public sentiment towards their business, product, or service in the media.\n\n3. Marketing Campaign Analysis\n ScoopRank users are marketing teams/Individuals who want to assess the effectiveness of their campaigns. ScoopRank is solving the problem of providing a way for these users to easily track and understand the public sentiment towards their campaigns in the media. That helps one understand whether the campaign gained any traction in the media whether it trended or not.\n\n4. Trending News Stories\n ScoopRank users are people who want to be informed about the current news stories that are popular and trending. ScoopRank is solving the problem of providing users with a way to stay informed about the latest news stories across various categories such as politics, entertainment, sports, technology, etc. Hence, one can see at a glance which news is trending overall or in a specific category.\n\nIn a world of trends, trending news, and hashtags, ScoopRank can prove to be a lifesaver! It can simplify analytics and enable the public to comprehend trends in comparison to others. With one click, you can have trends at your disposal – along with news clippings. And with another click, you can view real-time data on news items and gauge the popularity of any metric! Stay Tuned as ScoopRank launches and changes how we look at trending data!\n\n_And this is just the beginning!_\n", + content_html: null, + email_sent: 0, + meta_title: 'ScoopRank Intro Blog', + meta_description: + 'ScoopRank is a deep tech product that ranks entities and tracks trends of entities in real time across internet.', + meta_image: '/images/posts/scooprank/main.png', + _user_tags: null, + _comments: null, + _assign: null, + _liked_by: null, + }, + { + name: 'typescript-from-beginner-to-advanced-a-developers-guide', + creation: '2023-08-22 10:52:17.848767', + modified: '2023-08-22 10:53:18.932396', + modified_by: 'umair@prixite.com', + owner: 'umair@prixite.com', + docstatus: 0, + idx: 0, + title: "TypeScript: From Beginner to Advanced - A Developer's Guide", + blog_category: 'typescript', + blogger: 'Shaheryar', + route: + 'blog/typescript/typescript-from-beginner-to-advanced-a-developers-guide', + read_time: 3, + published_on: '2023-07-13', + published: 1, + featured: 0, + hide_cta: 0, + enable_email_notification: 1, + disable_comments: 0, + disable_likes: 0, + blog_intro: + 'TypeScript is a powerful programming language that enhances JavaScript with static typing, object-oriented features, and advanced tooling capabilities.', + content_type: 'Markdown', + content: null, + content_md: + "#### Introduction\n\nTypeScript is a powerful programming language that enhances JavaScript with static typing, object-oriented features, and advanced tooling capabilities. Whether you're just starting out or looking to level up your TypeScript skills, this blog post will guide you from the basics to advanced concepts. Let's dive in!\n\n##### Installing TypeScript\n\nTo get started with TypeScript, you'll need to install it globally on your machine. Open your command line interface and run the following command:\n\n```\nnpm install -g typescript\n```\n\n##### Basic Types\n\nTypeScript introduces static types to JavaScript. Let's explore some basic types:\n\n##### Type Inference\n\nTypeScript can infer types based on the assigned values:\n\n```typescript\nlet message: string = 'Hello, TypeScript!'\nlet count: number = 42\nlet isActive: boolean = true\nlet names: string[] = ['Alice', 'Bob', 'Charlie']\nlet tuple: [number, string] = [1, 'TypeScript']\n```\n\n##### Type Inference\n\nTypeScript can infer types based on the assigned values:\n\n```typescript\nlet username = 'John' // Type: string\nlet age = 25 // Type: number\nlet isValid = true // Type: boolean\n```\n\n##### Functions and Parameters\n\nFunctions can have explicit return types and parameter types:\n\n```typescript\nfunction add(a: number, b: number): number {\n return a + b\n}\nconsole.log(add(2, 3)) // Output: 5\n```\n\n##### Interfaces\n\nInterfaces define the shape of objects in TypeScript:\n\n```typescript\ninterface Person {\n name: string\n age: number\n}\n\nfunction greet(person: Person): string {\n return `Hello, ${person.name}! You are ${person.age} years old.`\n}\n\nconst alice: Person = { name: 'Alice', age: 30 }\nconsole.log(greet(alice)) // Output: Hello, Alice! You are 30 years old.\n```\n\n##### Classes and Inheritance\n\nTypeScript supports classes and inheritance:\n\n```typescript\nclass Animal {\n constructor(public name: string) {}\n\n move(distance: number = 0): void {\n console.log(`${this.name} moved ${distance} meters.`)\n }\n}\n\nclass Dog extends Animal {\n bark(): void {\n console.log('Woof! Woof!')\n }\n}\n\nconst dog = new Dog('Buddy')\ndog.move(10) // Output: Buddy moved 10 meters.\ndog.bark() // Output: Woof! Woof!\n```\n\n### Advanced TypeScript\n\n##### Generics\n\nGenerics allow you to create reusable components that work with a variety of types:\n\n```typescript\nfunction identity(arg: T): T {\n return arg\n}\n\nlet result = identity('TypeScript') // Type: string\n```\n\n##### Enums\n\nEnums provide a way to define a set of named constants:\n\n```typescript\nenum Color {\n Red,\n Green,\n Blue,\n}\nlet color: Color = Color.Green\nconsole.log(color) // Output: 1\n```\n\n##### Decorators\n\nDecorators enable you to modify classes, methods, and properties at design time:\n\n```typescript\nfunction log(target: any, key: string): void {\n console.log(`Method ${key} is invoked.`)\n}\nclass Calculator {\n @log\n add(a: number, b: number): number {\n return a + b\n }\n}\nconst calc = new Calculator()\ncalc.add(2, 3) // Output: Method add is invoked.\n```\n\n###### Modules and Namespaces\n\nModules and namespaces provide a way to organize and encapsulate code:\n\n```typescript\n// math.ts\nexport function add(a: number, b: number): number {\n return a + b\n}\n// app.ts\nimport { add } from './math'\nconsole.log(add(2, 3)) // Output: 5\n```\n\n##### Type Declarations for External Libraries\n\nTypeScript allows you to add type declarations for external JavaScript libraries:\n\n```typescript\n// my-library.d.ts\ndeclare module 'my-library' {\n export function greet(name: string): void\n}\n// app.ts\nimport { greet } from 'my-library'\ngreet('Alice') // Output: Hello, Alice!\n```\n\n#### Advanced Type Techniques\n\nTypeScript offers advanced type techniques such as conditional types, mapped types, and intersection types. These techniques allow you to create complex type transformations and compositions.\n\n#### Best Practices and Tips\n\n- Enable strict type checking (`\"strict\": true` in `tsconfig.json`) to catch potential errors.\n- Use interfaces and types to define clear contracts between components.\n- Leverage TypeScript's editor support for autocompletion, type inference, and refactoring.\n- Document your code using JSDoc-style type annotations to enhance code readability.\n- Regularly update TypeScript to benefit from the latest language features and improvements.\n\n#### Conclusion\n\nCongratulations on completing this journey from beginner to advanced TypeScript! You've learned the fundamentals, explored advanced concepts, and gained insights into best practices. TypeScript's static typing, object-oriented features, and powerful tooling make it a valuable language for building robust and scalable applications. Keep practicing, exploring, and applying TypeScript in your projects to become a proficient TypeScript developer.\n", + content_html: null, + email_sent: 0, + meta_title: "TypeScript: From Beginner to Advanced - A Developer's Guide", + meta_description: + 'TypeScript is a powerful programming language that enhances JavaScript with static typing, object-oriented features, and advanced tooling ca', + meta_image: '/images/1.png', + _user_tags: null, + _comments: null, + _assign: null, + _liked_by: null, + }, + { + name: 'web-scraping-with-chrome-extensions', + creation: '2023-08-22 11:16:17.265329', + modified: '2023-08-22 11:16:27.633198', + modified_by: 'umair@prixite.com', + owner: 'umair@prixite.com', + docstatus: 0, + idx: 0, + title: 'Web Scraping with Chrome Extensions', + blog_category: 'scraping', + blogger: 'Umair Jameel', + route: 'blog/scraping/web-scraping-with-chrome-extensions', + read_time: 6, + published_on: '2023-06-20', + published: 1, + featured: 0, + hide_cta: 0, + enable_email_notification: 1, + disable_comments: 0, + disable_likes: 0, + blog_intro: + 'Web scraping is the automated extraction of data from websites. No technical knowledge is required when scraping with chrome extensions.', + content_type: 'Markdown', + content: null, + content_md: + '\n#### What is Web Scraping?\n\nWeb scraping is the automated extraction of data from websites. It involves writing software or using tools to gather information from web pages and store it in a structured format for further analysis or use.\n\nTypically, web scraping involves sending HTTP requests to a website and then parsing the HTML or XML content of the web page to extract the desired data. This can be done using programming languages like Python, Node, chrome extensions etc.\n\n#### Chrome extensions for web scraping\n\nThere are a lot of extensions in chrome store for scraping data, but we will be using 2 very useful and highest rated extensions as shared below:\n\n1. [Scraper](https://chrome.google.com/webstore/detail/scraper/mbigbapnjcgaffohmbkdlecaccepngjd)\n2. [Web Scraper](https://chrome.google.com/webstore/detail/web-scraper-free-web-scra/jnhgnonknehpejjnehehllkliplmbmhn)\n\n
\n Photo 1\n
\n
\n Photo 1\n
\n\n#### Let\'s work with Scraper\n\nScraper is a very simple data scraping extension for facilitating online research when you need to get data into spreadsheet with very few clicks and steps. It saves a lot of time when you need to quickly fetch similar kinds of data from a page.\n\nOnce you install this extension in chrome. Open any web page which You want to scrape. Let\'s try it with [imdb](https://www.imdb.com/search/title/?title_type=feature&year=2023-01-01,2023-12-31) for getting the title of movies in 2023.\n\nSo, currenlty there are 50 movies show on first page. We need to select the title of first movie card (make sure you don\'t select the empty white spaces from start or end of title text). Once it is selected, right click on it and click scrape similar link. It will open up a window with all the results.\n\n##### Movies:\n\n
\n Photo 1\n
\n\n##### Results:\n\n
\n Photo 1\n
\nIn the text box, you can see that it selected the inner text of all elements create using //h3/a tags. If you have any idea about HTML, you can modify it as per your requirements.\n\nYou can select any element from any web page. It will find out all other similar elements within that page and make it a part of results, even if there are 1000s of similar data exists on that page.\nFrom the results window, you can copy the results or export the results to Google Drive.\n\nNow let\'s talk about some advance scraping using 2nd extension Web Scraper\n\n#### Let\'s work with Web Scraper now\n\nDownload and install Web Scraper from the link. [Web Scraper](https://chrome.google.com/webstore/detail/web-scraper-free-web-scra/jnhgnonknehpejjnehehllkliplmbmhn)\n\nOnce it is installed in your chrome browser, right click on any web page and click on inspect. Where you will see a new tab with name Web Scraper .\n\n
\n Photo 1\n
\n\nSo, the website which we will be scraping is https://www.whatmobile.com.pk/.\nFirst, we will fetch the links of all the phone brands from home page and then navigate into each of the brands page one by one to get the title and price of each mobile phone.\n\nBefore starting using it, go to the Manage Extension page by right clicking on the extension icon. On this page, add the url of website which you want to scrape and turn the toggles ON as shown in the image below.\n\n
\n Photo 1\n
\n\nNow come back to Web Scraper tab in the inspect window. Click on create new sitemap dropdown and create a new sitemap by entering the url of web page and give it any meaningful name you want. Finally click on Create Sitemap button.\n\n
\n Photo 1\n
\n\nOnce a new sitemap is created, it will show a table with rows of all sitemaps are created. It will show only 1 row as we are just starting using it. If we click on its row, it will take us to another page, showing \\_root link (representing root url of web page).\n\nIt will also show a button Add new selector which we need to click on and create a new selector.\n\n
\n Photo 1\n
\n\nIn the form for creating a new selector, add an id (which I have given the name brands), select Link from dropdown (since we need to navigate to each brands page), click on Multiple checkbox and then click on select button.\n\nOnce you do this, it will show a toolbar over the web page. You will then need to click at least 2 (similar) elements (I have clicked on top 2 brand names). It will then analyse the whole page and automatically select all other similar elements by itself.\n\nFinally, click on green button Done selecting.\n\nOnce you do perform these steps, it will show the selector for brands as shown below.\n\n
\n Photo 1\n
\n\nFinally, click on Save Selector button. It will show a table with all the selector we have created so far (currently only one).\n\nBefore moving forward and add more selectors for the products title and price. Let\'s try to scrape the brands links which we already setup.\nSo now, click on the Sitemap Whatmobile dropdown and click on Scrape link.\n\n
\n Photo 1\n
\n\nIt will take you to next screen, where you can keep the values 2000 in both the textboxes. Click on Start Scraping button. It will open up chrome windows and start scraping the data. You will see the following page containing a table of all brand titles, urls and links toward each page are visible.\n\n
\n Photo 1\n
\n\nNote that if you see any error asking you to use their cloud space. Just click on "Refresh Data" button.\nIf you want to export this sitemap or the data, click on Sitemap Whatmobile dropdown and it will show you the options.\n\nNote, let\'s scrape the titles and prices of each mobile phone in each brands url that you have already scraped.\n\nClick on Sitemap tab from top, it will show you a table with columns ID and Domain. Click on its table first row (currently only 1) and it will take you to next table where you will see the already created brands selector. Click on this row and it will take you to next page, where we need to create the selectors for each brand.\n\nThis is what you will see inside brands page.\n\n
\n Photo 1\n
\n\nThe reason, we will be creating selectors in this window for each mobile product\'s title is because each mobile product belongs to separate brand and brands selector will be the parent of each product selector.\n\nNow first click on any of the brand (e.g. Samsung mobile) from the web page, so we should be able to select the titles of mobile phone while creating the selector in Web Scraper window.\n\nNow follow the same steps that we have discussed above (when we created selector for brands) in order to select the titles of each mobile phone as shown in image below. Finally click on "Save selector" button.\n\n
\n Photo 1\n
\nIn the above image, you can see that I have set the type to Text, because we don\'t want to navigate inside each product, but only want to fetch the title text. You can also see that we have set the parent of this selector to brands but not \\_root as before.\n\nNow when we will run the scraper, it will take around 3-4 minutes to scrape the titles of all the products inside each brands page. Click on Refresh Data button to see the updated results.\n\n
\n Photo 1\n
\n\nThis is awesome 🎉\n\nIn the above image, you can see that it has appended the product\'s title column as well, belongs to its brand title.\n\n##### Your Task:\n\nNow go to the brands page inside Web Scraper where we have created the selector for product title. Create another selector for each mobile product price parallel to product\'s title following the same steps we have followed for product\'s title. Finally run the scraper again and see the results.\n\nThis is how the brands page should look like.\n\n
\n Photo 1\n
\n\nIf we click on "Selector Graph" from the dropdown "Sitemap Whatmobile". We can also see the graph like this:\n\n
\n Photo 1\n
\n\nThis is how we can use these awesome chrome extensions to scrape the data from any website we want and navigate to any page with respect to its parent selectors. Same way, we can add selectors for pagination numbers inside any page and set the parent of it.\n\nThere can be few schnerios on some web pages or some times webpages UI is quite complex that these chrome extension can\'t fullfil our requirements. This is when we have to manually write scripts (in node or python) to scrape the data from web pages. But mostly, I think these extensions fulfill the purpose.\n\nHappy learning 🎉\n', + content_html: null, + email_sent: 0, + meta_title: 'Web Scraping with Chrome Extensions', + meta_description: + 'Web scraping is the automated extraction of data from websites. No technical knowledge is required when scraping with chrome extensions.', + meta_image: '/images/posts/scrapingchromeextensions/thumbnail.png', + _user_tags: null, + _comments: null, + _assign: null, + _liked_by: null, + }, + { + name: 'whats-new-in-nextjs-13-a-developers-guide', + creation: '2023-08-21 16:35:31.731746', + modified: '2023-08-21 18:50:58.219017', + modified_by: 'umair@prixite.com', + owner: 'umair@prixite.com', + docstatus: 0, + idx: 0, + title: "What's new in next.js 13 - A Developer's Guide", + blog_category: 'nextjs', + blogger: 'Umair Jameel', + route: 'blog/react-and-nextjs/whats-new-in-nextjs-13-a-developers-guide', + read_time: 2, + published_on: '2023-08-21', + published: 1, + featured: 0, + hide_cta: 0, + enable_email_notification: 1, + disable_comments: 0, + disable_likes: 0, + blog_intro: + 'Next.js is a popular open-source framework for building modern web applications using React.', + content_type: 'Rich Text', + content: + '

What is Next.js?

Next.js is a popular open-source framework for building web applications with React. Next.js provides a streamlined development experience by offering server-side rendering (SSR), static site generation (SSG), and client-side rendering capabilities.

With Next.js, developers can build modern, dynamic web applications that are optimized for performance and SEO. It simplifies the process of creating server-rendered React applications by providing a structured setup and routing system.


What\'s new in Next.js 13?

Next.js 13 comes with new directory structure for routes and apis along with some new buildin layout and loading files.

Use this command to create a new next.js app with new version: npx create-next-app@latest

During installation, it will show you following prompts:

> What is your project named? my-app
> Would you like to use TypeScript with this project? No / Yes
> Would you like to use ESLint with this project? No / Yes
> Would you like to use Tailwind CSS with this project? No / Yes
> Would you like to use `src/` directory with this project? No / Yes
> Use App Router (recommended)? No / Yes
> Would you like to customize the default import alias? No / Yes

Next.js now ships with TypeScript, ESLint, and Tailwind CSS configuration by default.

These are the major upgrades added in this new version are these:

  1. Typescript by Default
  2. /app Directory for File-Based Routing
  3. React Server Components
  4. Async Components & Data Fetching
  5. Loading and Streaming
  6. Turbopack
  7. Upgrades in next/image, next/link, next/font
  8. MetaData


1. Typescript by Default

“Next.js provides a TypeScript-first development experience for building your React application.

It comes with built-in TypeScript support for automatically installing the necessary packages and configuring the proper settings. As well as a TypeScript Plugin for your editor.”

Read More: Typescript in Next.js 13+


2. /app Directory for File-Based Routing

Next.js 13 includes updated file routing with the new directory. The app directory introduces a new layout, loading structure as well as several new features and improvements.

The directory structure has introduces few changes due to the new routing mechanism. Each path in the route has a dedicated directory with a page.tsx file that serves as the content entry point in Next.js 13.

We can also add layout.tsxerror.tsx and loading.tsx files inside each folder within /app directory.


', + content_md: null, + content_html: null, + email_sent: 0, + meta_title: "What's new in next.js 13 - A Developer's Guide", + meta_description: + 'Next.js is a popular open-source framework for building modern web applications using React.', + meta_image: '/images/posts/whatsnewinnextjs/nextjs13-cover.png', + _user_tags: null, + _comments: null, + _assign: null, + _liked_by: null, + }, +] diff --git a/package-lock.json b/package-lock.json index 9bddb93b..9d0dbd23 100644 --- a/package-lock.json +++ b/package-lock.json @@ -25,6 +25,7 @@ "react-icons": "^4.10.1", "react-slick": "^0.29.0", "react-toastify": "^9.1.3", + "sanitize-html": "^2.11.0", "sass": "^1.63.6", "slick-carousel": "^1.8.1", "styled-components": "^6.0.3" @@ -3358,30 +3359,6 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/babel-plugin-styled-components": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/babel-plugin-styled-components/-/babel-plugin-styled-components-2.0.7.tgz", - "integrity": "sha512-i7YhvPgVqRKfoQ66toiZ06jPNA3p6ierpfUuEWxNF+fV27Uv5gxBkf8KZLHUCc1nFA9j6+80pYoIpqCeyW3/bA==", - "optional": true, - "peer": true, - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.16.0", - "@babel/helper-module-imports": "^7.16.0", - "babel-plugin-syntax-jsx": "^6.18.0", - "lodash": "^4.17.11", - "picomatch": "^2.3.0" - }, - "peerDependencies": { - "styled-components": ">= 2" - } - }, - "node_modules/babel-plugin-syntax-jsx": { - "version": "6.18.0", - "resolved": "https://registry.npmjs.org/babel-plugin-syntax-jsx/-/babel-plugin-syntax-jsx-6.18.0.tgz", - "integrity": "sha512-qrPaCSo9c8RHNRHIotaufGbuOBN8rtdC4QrrFFc43vyWCCz7Kl7GL1PGaXtMGQZUXrkCjNEgxDfmAuAabr/rlw==", - "optional": true, - "peer": true - }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", @@ -3833,6 +3810,68 @@ "csstype": "^3.0.2" } }, + "node_modules/dom-serializer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", + "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.2", + "entities": "^4.2.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/dom-serializer/node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/domelementtype": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", + "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ] + }, + "node_modules/domhandler": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", + "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", + "dependencies": { + "domelementtype": "^2.3.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/domutils": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", + "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", + "dependencies": { + "dom-serializer": "^2.0.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, "node_modules/electron-to-chromium": { "version": "1.4.454", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.454.tgz", @@ -5116,6 +5155,35 @@ "react-is": "^16.7.0" } }, + "node_modules/htmlparser2": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz", + "integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "entities": "^4.4.0" + } + }, + "node_modules/htmlparser2/node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, "node_modules/ignore": { "version": "5.2.4", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", @@ -5389,6 +5457,14 @@ "node": ">=8" } }, + "node_modules/is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/is-regex": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", @@ -5533,12 +5609,6 @@ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", "dev": true }, - "node_modules/jquery": { - "version": "3.6.3", - "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.6.3.tgz", - "integrity": "sha512-bZ5Sy3YzKo9Fyc8wH2iIQK4JImJ6R0GWI9kL1/k7Z91ZBNgkRXE6U0JfHIizZbort8ZunhSI3jw9I6253ahKfg==", - "peer": true - }, "node_modules/js-tokens": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", @@ -5702,13 +5772,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "optional": true, - "peer": true - }, "node_modules/lodash.debounce": { "version": "4.0.8", "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", @@ -6202,6 +6265,11 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/parse-srcset": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/parse-srcset/-/parse-srcset-1.0.2.tgz", + "integrity": "sha512-/2qh0lav6CmI15FzA3i/2Bzk2zCgQhGMkvhOhKNcBVQ1ldgpbfiNTVslmooUmWJcADi1f1kIeynbDRVzNlfR6Q==" + }, "node_modules/path-exists": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", @@ -6742,6 +6810,19 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/sanitize-html": { + "version": "2.11.0", + "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.11.0.tgz", + "integrity": "sha512-BG68EDHRaGKqlsNjJ2xUB7gpInPA8gVx/mvjO743hZaeMCZ2DwzW7xvsqZ+KNU4QKwj86HJ3uu2liISf2qBBUA==", + "dependencies": { + "deepmerge": "^4.2.2", + "escape-string-regexp": "^4.0.0", + "htmlparser2": "^8.0.0", + "is-plain-object": "^5.0.0", + "parse-srcset": "^1.0.2", + "postcss": "^8.3.11" + } + }, "node_modules/sass": { "version": "1.63.6", "resolved": "https://registry.npmjs.org/sass/-/sass-1.63.6.tgz", diff --git a/pages/blog/[blogname].tsx b/pages/blog/[blogname].tsx new file mode 100644 index 00000000..99c20b62 --- /dev/null +++ b/pages/blog/[blogname].tsx @@ -0,0 +1,54 @@ +import Head from 'next/head' +import { BlogPost } from '../../types/interfaces' +import axios from 'axios' +import dynamic from 'next/dynamic' + +export default function BlogDetailPage({ blog }: { blog: BlogPost }) { + const Blog = dynamic( + () => import('../../components/Presentational/SingleBlog/Blog'), + { ssr: false } + ) + + return ( + <> + + {blog?.title} + + + + ) +} + +export async function getServerSideProps({ + params: { blogname }, +}: { + params: { blogname: string } +}) { + try { + const headers = { + Authorization: `token ${process.env.NEXT_PUBLIC_ERP_AUTH_TOKEN}`, + } + const response = await axios.get( + `${process.env.NEXT_PUBLIC_ERP_BASEPATH}/api/resource/Blog%20Post?fields=[%22*%22]&filters=[[%22Blog%20Post%22,%22name%22,%22=%22,%22${blogname}%22]]`, + { + headers, + } + ) + const blogDataArray = response.data.data + const blog = + blogDataArray && blogDataArray.length > 0 ? blogDataArray[0] : null + + return { + props: { + blog: blog, + }, + } + } catch (error) { + console.error('Error fetching blog data:', error) + return { + props: { + blog: null, + }, + } + } +} diff --git a/pages/blogs/index.tsx b/pages/blogs/index.tsx index 7b642dff..8047f500 100644 --- a/pages/blogs/index.tsx +++ b/pages/blogs/index.tsx @@ -1,18 +1,13 @@ import React from 'react' import { Box, Typography, Container, Button } from '@mui/material' import { newsAndBlogs } from '../../data/data' +import { BlogPost } from '../../types/interfaces' import Head from 'next/head' import Link from 'next/link' -import fs from 'fs' -import path from 'path' import Image from 'next/image' -import { BlogPost } from '../../types/interfaces' -import { sortByDate } from '../../utils/sort' -import { MDContent } from '../../types/interfaces' -import { BLOGS_PATH } from '../../utils/constants' -import { getMarkdownAllData } from '../../utils/markdown' +import axios from 'axios' -const Blog = ({ blogs }: MDContent) => { +const Blog = ({ blogs }: { blogs: BlogPost[] }) => { const { title, header } = newsAndBlogs return ( @@ -42,7 +37,7 @@ const Blog = ({ blogs }: MDContent) => { {blogs?.map((post: BlogPost, index: number) => (
{ layout="responsive" /> -
Posted on {post.frontmatter.date}
+
Posted on {post.published_on}
-

{post.frontmatter.title}

+

{post.meta_title}

-

{post.frontmatter.excerpt}

+

{post.meta_description}

- + @@ -71,12 +66,30 @@ const Blog = ({ blogs }: MDContent) => { export default Blog export async function getStaticProps() { - const blogFiles = fs.readdirSync(path.join(BLOGS_PATH)) - const blogs = getMarkdownAllData(blogFiles, BLOGS_PATH, fs) - - return { - props: { - blogs: blogs.sort(sortByDate), - }, + try { + const headers = { + Authorization: `token ${process.env.NEXT_PUBLIC_ERP_AUTH_TOKEN}`, + } + const response = await axios.get( + `${process.env.NEXT_PUBLIC_ERP_BASEPATH}/api/resource/Blog%20Post?fields=[%22*%22]`, + { + headers, + } + ) + const blogs = response.data.data?.filter( + (blog: BlogPost) => blog?.published === 1 + ) + return { + props: { + blogs: blogs, + }, + } + } catch (error) { + console.error('Error fetching blog data:', error) + return { + props: { + blogs: [], + }, + } } } diff --git a/pages/index.tsx b/pages/index.tsx index 19fa78d6..bd64619f 100644 --- a/pages/index.tsx +++ b/pages/index.tsx @@ -27,13 +27,15 @@ import { BlogPost, MDContent, Product } from '../types/interfaces' import { sortByDate, sortByIndex } from '../utils/sort' import { getMarkdownAllData, getMarkDownSingleData } from '../utils/markdown' import { - BLOGS_PATH, + // BLOGS_PATH, SERVICES_PATH, TESTIMONIALS_PATH, ABOUT_US_PATH, PRODUCT_PATH, } from '../utils/constants' import { FEATURES } from '../data/features' +// import { data } from '../data/blogdata' +import axios from 'axios' export default function Home({ blogs, @@ -138,12 +140,12 @@ export default function Home({ {newsHeading} - {blogs?.slice(0, 2).map((blog: BlogPost, index: number) => ( + {blogs?.map((blog, index: number) => (
- + image
- Posted on {blog.frontmatter.date} + Posted on {blog.published_on}
- -

{blog.frontmatter.title}

+ +

{blog.title}

-

{blog.frontmatter.excerpt}

+

{blog.blog_intro}

- + Read More
@@ -229,12 +231,26 @@ export default function Home({ } export async function getStaticProps() { - const blogFiles = fs.readdirSync(path.join(BLOGS_PATH)) + let blogs = [] + try { + const headers = { + Authorization: `token ${process.env.NEXT_PUBLIC_ERP_AUTH_TOKEN}`, + } + const response = await axios.get( + `${process.env.NEXT_PUBLIC_ERP_BASEPATH}/api/resource/Blog%20Post?fields=[%22*%22]`, + { + headers, + } + ) + blogs = response.data.data?.filter( + (blog: BlogPost) => blog?.published === 1 + ) + } catch (error) { + console.error('Error fetching blog data:', error) + } const serviceFiles = fs.readdirSync(path.join(SERVICES_PATH)) const testimonialFiles = fs.readdirSync(path.join(TESTIMONIALS_PATH)) const productFiles = fs.readdirSync(path.join(PRODUCT_PATH)) - - const blogs = getMarkdownAllData(blogFiles, BLOGS_PATH, fs) const services = getMarkdownAllData(serviceFiles, SERVICES_PATH, fs) const testimonials = getMarkdownAllData( testimonialFiles, @@ -246,7 +262,7 @@ export async function getStaticProps() { return { props: { - blogs: blogs.sort(sortByDate), + blogs: blogs.slice(0, 2), services: services.sort(sortByIndex), testimonials: testimonials.sort(sortByDate), aboutUs: aboutUs.props, diff --git a/types/interfaces.ts b/types/interfaces.ts index 77c6d18a..c98f97a2 100644 --- a/types/interfaces.ts +++ b/types/interfaces.ts @@ -1,13 +1,13 @@ -export interface BlogPost { - frontmatter: { - cover_image: string - date: string - excerpt: string - title: string - author: string - } - slug: string -} +// export interface BlogPost { +// frontmatter: { +// cover_image: string +// date: string +// excerpt: string +// title: string +// author: string +// } +// slug: string +// } export interface Product { frontmatter: { @@ -148,3 +148,34 @@ export interface SortbyIndexParam { slug: string frontmatter: { [key: string]: string | number } } + +export interface BlogPost { + name: string + creation: string + modified: string + modified_by: string + owner: string + docstatus: number + idx: number + title: string + blog_category: string + blogger: string + route: string + read_time: number + published_on: string + published: number + featured: number + hide_cta: number + enable_email_notification: number + disable_comments: number + disable_likes: number + blog_intro: string + content_type: string + content: string + content_md: string + content_html: string + email_sent: number + meta_title: string + meta_description: string + meta_image: string +}